diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4b4c37f399b7..0a9d37a05a5f 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -14,29 +14,14 @@ # limitations under the License. version: 2 updates: - # Maintain dependencies for GitHub Actions - - package-ecosystem: "npm" - directory: "hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/" - schedule: - interval: "weekly" - commit-message: - prefix: "[Recon] Dependabot Package Upgrade: " - groups: - minor-updates: - patterns: - - "*" - update-types: - - "minor" - - "patch" + - package-ecosystem: maven + directory: "/" ignore: - dependency-name: "*" update-types: ["version-update:semver-major"] - pull-request-branch-name: - separator: "-" - - - package-ecosystem: maven - directory: "/" schedule: interval: "weekly" + day: "saturday" + time: "07:00" # UTC pull-request-branch-name: separator: "-" diff --git a/hadoop-ozone/dist/src/main/smoketest/env-compose.robot b/.github/labeler.yml similarity index 51% rename from hadoop-ozone/dist/src/main/smoketest/env-compose.robot rename to .github/labeler.yml index d21eacaea50a..fc68079617a6 100644 --- a/hadoop-ozone/dist/src/main/smoketest/env-compose.robot +++ b/.github/labeler.yml @@ -13,20 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -*** Settings *** -Documentation High level utilities to execute commands and tests in docker-compose based environments. -Resource commonlib.robot -Test Timeout 5 minutes +# Configuration for .github/workflows/label-pr.yml -*** Keywords *** +# This rule can be deleted once the container reconciliation feature branch is merged. +container-reconciliation: +- base-branch: HDDS-10239-container-reconciliation -Run tests on host - [arguments] ${host} ${robotfile} - ${result} = Execute docker-compose exec ${host} robot smoketest/${robotfile} - -Execute on host - [arguments] ${host} ${command} - ${rc} ${output} = Run And Return Rc And Output docker-compose exec ${host} ${command} - Log ${output} - Should Be Equal As Integers ${rc} 0 - [return] ${output} diff --git a/.github/workflows/build-ratis.yml b/.github/workflows/build-ratis.yml new file mode 100644 index 000000000000..a6dd6b08adc1 --- /dev/null +++ b/.github/workflows/build-ratis.yml @@ -0,0 +1,137 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow can be called by other workflows to build Ratis. +# +# Inputs: +# - Ratis repo +# - the commit to build +# Outputs: +# - various version numbers that need to be provided to the Ozone build process. +# - Ratis repository is uploaded as an artifact named `ratis-jars` +# +# See `intermittent-test-check.yml` as an example use of this workflow. + +name: build-ratis +on: + workflow_call: + inputs: + repo: + description: Ratis repository + default: apache/ratis + required: true + type: string + ref: + description: Ratis ref (branch, tag or commit SHA) + default: master + required: true + type: string + outputs: + ratis-version: + description: "Ratis Version" + value: ${{ jobs.ratis.outputs.ratis-version }} + thirdparty-version: + description: "Ratis Third-Party Version" + value: ${{ jobs.ratis.outputs.thirdparty-version }} + grpc-version: + description: "gRPC Version" + value: ${{ jobs.ratis-thirdparty.outputs.grpc-version }} + netty-version: + description: "Netty Version" + value: ${{ jobs.ratis-thirdparty.outputs.netty-version }} + protobuf-version: + description: "Protobuf Version" + value: ${{ jobs.ratis-thirdparty.outputs.protobuf-version }} +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 +jobs: + ratis: + runs-on: ubuntu-20.04 + timeout-minutes: 60 + outputs: + ratis-version: ${{ steps.versions.outputs.ratis }} + thirdparty-version: ${{ steps.versions.outputs.thirdparty }} + steps: + - name: Checkout project + uses: actions/checkout@v4 + with: + repository: ${{ inputs.repo }} + ref: ${{ inputs.ref }} + - name: Cache for maven dependencies + uses: actions/cache@v4 + with: + path: | + ~/.m2/repository + !~/.m2/repository/org/apache/ratis + key: ratis-dependencies-${{ hashFiles('**/pom.xml') }} + - name: Setup java + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: 8 + - name: Get component versions + id: versions + run: | + thirdparty_version="$(mvn help:evaluate -N -q -DforceStdout -Dexpression=ratis.thirdparty.version)" + echo "thirdparty=${thirdparty_version}" >> $GITHUB_OUTPUT + + ratis_sha=$(git rev-parse --short HEAD) + ratis_version="$(mvn help:evaluate -N -q -DforceStdout -Dexpression=project.version | sed -e "s/-SNAPSHOT/-${ratis_sha}-SNAPSHOT/")" + echo "ratis=${ratis_version}" >> $GITHUB_OUTPUT + - name: Run a full build + run: | + mvn versions:set -DnewVersion=${{ steps.versions.outputs.ratis }} + dev-support/checks/build.sh + - name: Store Maven repo for tests + uses: actions/upload-artifact@v4 + with: + name: ratis-jars + path: | + ~/.m2/repository/org/apache/ratis + retention-days: 1 + ratis-thirdparty: + runs-on: ubuntu-20.04 + needs: + - ratis + timeout-minutes: 30 + outputs: + grpc-version: ${{ steps.versions.outputs.grpc }} + netty-version: ${{ steps.versions.outputs.netty }} + protobuf-version: ${{ steps.versions.outputs.protobuf }} + steps: + - name: Checkout project + uses: actions/checkout@v4 + with: + repository: apache/ratis-thirdparty + ref: ${{ needs.ratis.outputs.thirdparty-version }} + - name: Get component versions + id: versions + run: | + echo "grpc=$(mvn help:evaluate -N -q -DforceStdout -Dexpression=shaded.grpc.version)" >> $GITHUB_OUTPUT + echo "netty=$(mvn help:evaluate -N -q -DforceStdout -Dexpression=shaded.netty.version)" >> $GITHUB_OUTPUT + echo "protobuf=$(mvn help:evaluate -N -q -DforceStdout -Dexpression=shaded.protobuf.version)" >> $GITHUB_OUTPUT + debug: + runs-on: ubuntu-20.04 + needs: + - ratis + - ratis-thirdparty + steps: + - name: Print versions + run: | + echo ${{ needs.ratis.outputs.ratis-version }} + echo ${{ needs.ratis.outputs.thirdparty-version }} + echo ${{ needs.ratis-thirdparty.outputs.grpc-version }} + echo ${{ needs.ratis-thirdparty.outputs.netty-version }} + echo ${{ needs.ratis-thirdparty.outputs.protobuf-version }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 74c4756cfd04..cc07865f7f17 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,9 +27,8 @@ jobs: outputs: acceptance-suites: ${{ steps.acceptance-suites.outputs.suites }} needs-basic-check: ${{ steps.categorize-basic-checks.outputs.needs-basic-check }} - needs-unit-check: ${{ steps.categorize-basic-checks.outputs.needs-unit-check }} + needs-native-check: ${{ steps.categorize-basic-checks.outputs.needs-native-check }} basic-checks: ${{ steps.categorize-basic-checks.outputs.basic-checks }} - unit-checks: ${{ steps.categorize-basic-checks.outputs.unit-checks }} needs-build: ${{ steps.selective-checks.outputs.needs-build }} needs-compile: ${{ steps.selective-checks.outputs.needs-compile }} needs-compose-tests: ${{ steps.selective-checks.outputs.needs-compose-tests }} @@ -92,10 +91,10 @@ jobs: restore-keys: | ${{ runner.os }}-pnpm- - name: Cache for maven dependencies - uses: actions/cache@v4 + uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -164,7 +163,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -202,7 +201,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -228,17 +227,13 @@ jobs: name: ${{ matrix.check }} path: target/${{ matrix.check }} continue-on-error: true - unit: + native: needs: - build-info - basic runs-on: ubuntu-20.04 timeout-minutes: 150 - if: needs.build-info.outputs.needs-unit-check == 'true' - strategy: - matrix: - check: ${{ fromJson(needs.build-info.outputs.unit-checks) }} - fail-fast: false + if: needs.build-info.outputs.needs-native-check == 'true' steps: - name: Checkout project uses: actions/checkout@v4 @@ -246,7 +241,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -257,19 +252,19 @@ jobs: distribution: 'temurin' java-version: 8 - name: Execute tests - run: hadoop-ozone/dev-support/checks/${{ matrix.check }}.sh + run: hadoop-ozone/dev-support/checks/${{ github.job }}.sh continue-on-error: true env: GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Summary of failures - run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ matrix.check }}/summary.txt + run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} with: - name: ${{ matrix.check }} - path: target/${{ matrix.check }} + name: ${{ github.job }} + path: target/${{ github.job }} continue-on-error: true dependency: needs: @@ -314,7 +309,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -434,12 +429,14 @@ jobs: matrix: profile: - client - - contract + - container - filesystem - hdds - om - ozone + - recon - shell + - snapshot - flaky fail-fast: false steps: @@ -449,31 +446,19 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | maven-repo- - - name: Download Ozone repo - id: download-ozone-repo - uses: actions/download-artifact@v4 - with: - name: ozone-repo - path: | - ~/.m2/repository/org/apache/ozone - continue-on-error: true - name: Setup java uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 8 + java-version: 17 - name: Execute tests continue-on-error: true run: | - if [[ -e "${{ steps.download-ozone-repo.outputs.download-path }}" ]]; then - export OZONE_REPO_CACHED=true - fi - args= if [[ "${{ matrix.profile }}" == "flaky" ]]; then args="-Dsurefire.rerunFailingTestsCount=5 -Dsurefire.fork.timeout=3600" @@ -497,9 +482,9 @@ jobs: timeout-minutes: 30 if: github.repository == 'apache/ozone' && github.event_name != 'pull_request' needs: - - unit - acceptance - integration + - native steps: - name: Checkout project uses: actions/checkout@v4 @@ -509,7 +494,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | diff --git a/.github/workflows/dependabot-ci.yml b/.github/workflows/dependabot-ci.yml deleted file mode 100644 index aa216d1c58a6..000000000000 --- a/.github/workflows/dependabot-ci.yml +++ /dev/null @@ -1,78 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: dependabot-ci -on: - pull_request: - types: [opened, synchronize] -concurrency: - group: dependabot-ci-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} -jobs: - dependabot-check: - if: ${{ github.event_name == 'pull_request' && github.actor == 'dependabot[bot]' && startsWith(github.event.pull_request.title, '[Recon] Dependabot Package Upgrade') }} - runs-on: ubuntu-20.04 - env: - CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - NODE_VERSION: 16.14.2 - PNPM_VERSION: 7.33.6 - cURL_LOC: /usr/bin/curl - cURL_ARGS: -fsSL - PNPM_URL: https://get.pnpm.io/install.sh - timeout-minutes: 15 - permissions: - contents: write - steps: - - name: Extract branch name - shell: bash - run: echo "branch_name=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT - id: get_branch_name - - name: Checkout dependabot branch - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - path: ozone - - name: Delete the lockfile - working-directory: ozone - run: | - #Delete the lockfile created by dependabot - rm -rf hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml - - name: Install NodeJS v${{ env.NODE_VERSION }} - uses: actions/setup-node@v4 - with: - node-version: ${{ env.NODE_VERSION }} - - name: Install pnpm v${{ env.PNPM_VERSION }} and recreate lockfile - working-directory: ozone - shell: bash - run: | - # Install PNPM and recreate lockfile - echo "Fetching pnpm from $PNPM_URL with version $PNPM_VERSION" - $cURL_LOC $cURL_ARGS $PNPM_URL | env PNPM_VERSION=$PNPM_VERSION SHELL="$(which sh)" ENV="$HOME/.shrc" sh - - source /home/runner/.shrc - PNPM_EXEC=$(which pnpm) - echo "pnpm is present at: $PNPM_EXEC" - $PNPM_EXEC config set store-dir ~/.pnpm-store - cd hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/ - $PNPM_EXEC install --lockfile-only - - name: Commit generated lockfile - working-directory: ozone - run: | - OZONE_SHA=$(git -C ./ rev-parse HEAD) - cd hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/ - git add ./pnpm-lock.yaml - git status - git config --global user.name 'Github Actions' - git config --global user.email 'noreply@github.com' - git commit -m "[auto] Generated pnpm-lock from actions for $OZONE_SHA" || true - git push origin HEAD:${{ steps.get_branch_name.outputs.branch_name }} diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index 3239215aa907..100e9ab24944 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -40,12 +40,22 @@ on: description: Stop after first failure default: false required: true + ratis-repo: + description: Ratis repository + default: '' + required: false + ratis-ref: + description: Ratis ref (branch, tag or commit SHA) + default: '' + required: false env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 TEST_CLASS: ${{ github.event.inputs.test-class}} TEST_METHOD: ${{ github.event.inputs.test-name }} ITERATIONS: ${{ github.event.inputs.iterations }} FAIL_FAST: ${{ github.event.inputs.fail-fast }} + RATIS_REPO: ${{ github.event.inputs.ratis-repo }} + RATIS_VERSION: ${{ github.event.inputs.ratis-ref }} run-name: ${{ github.event_name == 'workflow_dispatch' && format('{0}#{1}[{2}]-{3}x{4}', inputs.test-class, inputs.test-name, inputs.ref, inputs.splits, inputs.iterations) || '' }} jobs: prepare-job: @@ -66,30 +76,55 @@ jobs: printf -v x "%s," "${splits[@]}" split_matrix="[${x%,}]" echo "matrix=$split_matrix" >> $GITHUB_OUTPUT + ratis: + uses: ./.github/workflows/build-ratis.yml + if: ${{ github.event.inputs.ratis-ref != '' }} + with: + repo: ${{ github.event.inputs.ratis-repo || format('{0}/ratis', github.repository_owner) }} + ref: ${{ github.event.inputs.ratis-ref }} build: + if: ${{ always() }} needs: - prepare-job + - ratis runs-on: ubuntu-20.04 timeout-minutes: 60 steps: - name: Checkout project uses: actions/checkout@v4 - name: Cache for maven dependencies - uses: actions/cache@v4 + uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | maven-repo- + - name: Download Ratis repo + if: ${{ github.event.inputs.ratis-ref != '' }} + uses: actions/download-artifact@v4 + with: + name: ratis-jars + path: | + ~/.m2/repository/org/apache/ratis - name: Setup java uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 - name: Build (most) of Ozone - run: hadoop-ozone/dev-support/checks/build.sh -Dskip.npx -Dskip.installnpx -DskipShade + run: | + args="-Dskip.npx -Dskip.installnpx -DskipShade" + if [[ "${{ github.event.inputs.ratis-ref }}" != "" ]]; then + args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}" + args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}" + args="$args -Dio.grpc.version=${{ needs.ratis.outputs.grpc-version }}" + args="$args -Dnetty.version=${{ needs.ratis.outputs.netty-version }}" + args="$args -Dgrpc.protobuf-compile.version=${{ needs.ratis.outputs.protobuf-version }}" + fi + + hadoop-ozone/dev-support/checks/build.sh $args - name: Store Maven repo for tests uses: actions/upload-artifact@v4 with: @@ -98,8 +133,10 @@ jobs: ~/.m2/repository/org/apache/ozone retention-days: 1 run-test: + if: ${{ always() }} needs: - prepare-job + - ratis - build name: Run-Split runs-on: ubuntu-20.04 @@ -115,11 +152,18 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | maven-repo- + - name: Download Ratis repo + if: ${{ github.event.inputs.ratis-ref != '' }} + uses: actions/download-artifact@v4 + with: + name: ratis-jars + path: | + ~/.m2/repository/org/apache/ratis - name: Download Ozone repo id: download-ozone-repo uses: actions/download-artifact@v4 @@ -140,6 +184,14 @@ jobs: fi args="-DexcludedGroups=native|slow|unhealthy" + if [[ "${{ github.event.inputs.ratis-ref }}" != "" ]]; then + args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}" + args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}" + args="$args -Dio.grpc.version=${{ needs.ratis.outputs.grpc-version }}" + args="$args -Dnetty.version=${{ needs.ratis.outputs.netty-version }}" + args="$args -Dgrpc.protobuf-compile.version=${{ needs.ratis.outputs.protobuf-version }}" + fi + if [ "$TEST_METHOD" = "ALL" ]; then echo "Running all tests from $TEST_CLASS" set -x diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml new file mode 100644 index 000000000000..abc620b7ef09 --- /dev/null +++ b/.github/workflows/label-pr.yml @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow reads its configuration from the .github/labeler.yml file. +name: pull-request-labeler +on: +- pull_request_target + +jobs: + labeler: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 + diff --git a/.github/workflows/populate-cache.yml b/.github/workflows/populate-cache.yml new file mode 100644 index 000000000000..d4c9cd8120ab --- /dev/null +++ b/.github/workflows/populate-cache.yml @@ -0,0 +1,74 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow creates cache with Maven dependencies for Ozone build. + +name: populate-cache + +on: + push: + branches: + - master + - ozone-1.4 + paths: + - 'pom.xml' + - '**/pom.xml' + - '.github/workflows/populate-cache.yml' + schedule: + - cron: '20 3 * * *' + +jobs: + build: + runs-on: ubuntu-20.04 + steps: + - name: Checkout project + uses: actions/checkout@v4 + + - name: Restore cache for Maven dependencies + id: restore-cache + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + + - name: Setup Java + if: steps.restore-cache.outputs.cache-hit != 'true' + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: 8 + + - name: Fetch dependencies + if: steps.restore-cache.outputs.cache-hit != 'true' + run: mvn --batch-mode --fail-never --no-transfer-progress --show-version -Pgo-offline -Pdist clean verify + + - name: Delete Ozone jars from repo + if: steps.restore-cache.outputs.cache-hit != 'true' + run: rm -fr ~/.m2/repository/org/apache/ozone + + - name: List repo contents + if: steps.restore-cache.outputs.cache-hit != 'true' + run: find ~/.m2/repository -type f | sort | xargs ls -lh + + - name: Save cache for Maven dependencies + if: steps.restore-cache.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} diff --git a/.github/workflows/repeat-acceptance.yml b/.github/workflows/repeat-acceptance.yml index 7269a9c417a6..74ef6b87c2fc 100644 --- a/.github/workflows/repeat-acceptance.yml +++ b/.github/workflows/repeat-acceptance.yml @@ -81,6 +81,8 @@ jobs: steps: - name: Checkout project uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.ref }} - name: Cache for npm dependencies uses: actions/cache@v4 with: @@ -91,9 +93,11 @@ jobs: restore-keys: | ${{ runner.os }}-pnpm- - name: Cache for maven dependencies - uses: actions/cache@v4 + uses: actions/cache/restore@v4 with: - path: ~/.m2/repository + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }}-${{ env.JAVA_VERSION }} restore-keys: | maven-repo-${{ hashFiles('**/pom.xml') }} @@ -115,12 +119,6 @@ jobs: hadoop-ozone/dist/target/ozone-*.tar.gz !hadoop-ozone/dist/target/ozone-*-src.tar.gz retention-days: 1 - - name: Delete temporary build artifacts before caching - run: | - #Never cache local artifacts - rm -rf ~/.m2/repository/org/apache/ozone/hdds* - rm -rf ~/.m2/repository/org/apache/ozone/ozone* - if: always() acceptance: needs: - prepare-job diff --git a/.gitignore b/.gitignore index 61a3d80e87a0..4cf4dd86288f 100644 --- a/.gitignore +++ b/.gitignore @@ -29,46 +29,13 @@ azure-bfs-auth-keys.xml */.externalToolBuilders */maven-eclipse.xml -hadoop-common-project/hadoop-kms/downloads/ -hadoop-hdfs-project/hadoop-hdfs/downloads -hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/dist -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tmp -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/node -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/node_modules -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower_components -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.sass-cache -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/connect.lock -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/coverage/* -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/libpeerconnection.log -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/npm-debug.log -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp -yarnregistry.pdf -patchprocess/ -.history/ -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log - -# Ignore files generated by HDDS acceptance tests. -hadoop-ozone/acceptance-test/docker-compose.log -hadoop-ozone/acceptance-test/junit-results.xml - #robotframework outputs log.html output.xml report.html -hadoop-hdds/docs/public -hadoop-hdds/docs/.hugo_build.lock -hadoop-ozone/recon/node_modules - .dev-tools dev-support/ci/bats-assert dev-support/ci/bats-support -hadoop-ozone/dist/src/main/license/current.txt - -.mvn/.gradle-enterprise/ \ No newline at end of file +.mvn/.gradle-enterprise/ diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index 4ac67491e726..15e487eec6bf 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -24,11 +24,11 @@ com.gradle gradle-enterprise-maven-extension - 1.20 + 1.20.1 com.gradle common-custom-user-data-maven-extension - 1.12.4 + 1.13 diff --git a/LICENSE.txt b/LICENSE.txt index 021266844b82..8a367a318628 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -213,8 +213,6 @@ Apache License 2.0 hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js -hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java -hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java BSD 3-Clause diff --git a/SECURITY.md b/SECURITY.md index 2f92dd685c12..3a89968026a2 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -5,13 +5,16 @@ The first stable release of Apache Ozone is 1.0, the previous alpha and beta releases are not supported by the community. | Version | Supported | -| ------------- | ------------------ | +|---------------| ------------------ | | 0.3.0 (alpha) | :x: | | 0.4.0 (alpha) | :x: | | 0.4.1 (alpha) | :x: | | 0.5.0 (beta) | :x: | -| 1.0 | :white_check_mark: | -| 1.1 | :white_check_mark: | +| 1.0.0 | :x: | +| 1.1.0 | :x: | +| 1.2.1 | :x: | +| 1.3.0 | :x: | +| 1.4.0 | :white_check_mark: | ## Reporting a Vulnerability diff --git a/dev-support/ci/categorize_basic_checks.sh b/dev-support/ci/categorize_basic_checks.sh index d32fe169a2c0..9b4adf264089 100755 --- a/dev-support/ci/categorize_basic_checks.sh +++ b/dev-support/ci/categorize_basic_checks.sh @@ -26,38 +26,27 @@ ALL_BASIC_CHECKS="${ALL_BASIC_CHECKS[@]%\]}" # Replace commas with spaces to form a space-delimited list SPACE_DELIMITED_ALL_CHECKS=$(echo "$ALL_BASIC_CHECKS" | tr -d '"' | tr ',' ' ') -BASIC_CHECKS=$(grep -lr '^#checks:basic' hadoop-ozone/dev-support/checks \ - | sort -u | xargs -n1 basename \ - | cut -f1 -d'.') - -UNIT_CHECKS=$(grep -lr '^#checks:unit' hadoop-ozone/dev-support/checks \ - | sort -u | xargs -n1 basename \ - | cut -f1 -d'.') - if [[ -n "${SPACE_DELIMITED_ALL_CHECKS}" ]]; then - SPACE_DELIMITED_ALL_CHECKS=" ${SPACE_DELIMITED_ALL_CHECKS[*]} " # add framing blanks - basic=() - for item in ${BASIC_CHECKS[@]}; do - if [[ $SPACE_DELIMITED_ALL_CHECKS =~ " $item " ]] ; then # use $item as regexp - basic+=($item) + # add framing blanks + SPACE_DELIMITED_ALL_CHECKS=" ${SPACE_DELIMITED_ALL_CHECKS[*]} " + + for check in basic native; do + CHECKS=$(grep -lr "^#checks:${check}$" hadoop-ozone/dev-support/checks \ + | sort -u \ + | xargs -n1 basename \ + | cut -f1 -d'.') + + check_list=() + for item in ${CHECKS[@]}; do + # use $item as regex + if [[ $SPACE_DELIMITED_ALL_CHECKS =~ " $item " ]] ; then + check_list+=($item) fi done - if [[ -n "${basic[@]}" ]]; then - initialization::ga_output needs-basic-check "true" + if [[ -n "${check_list[@]}" ]]; then + initialization::ga_output "needs-${check}-check" "true" fi - initialization::ga_output basic-checks \ - "$(initialization::parameters_to_json ${basic[@]})" - - unit=() - for item in ${UNIT_CHECKS[@]}; do - if [[ $SPACE_DELIMITED_ALL_CHECKS =~ " $item " ]] ; then # use $item as regexp - unit+=($item) - fi - done - if [[ -n "${unit[@]}" ]]; then - initialization::ga_output needs-unit-check "true" - fi - initialization::ga_output unit-checks \ - "$(initialization::parameters_to_json ${unit[@]})" + initialization::ga_output "${check}-checks" \ + "$(initialization::parameters_to_json ${check_list[@]})" + done fi - diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats index 9fe1708c9137..a95a981bdd3e 100644 --- a/dev-support/ci/selective_ci_checks.bats +++ b/dev-support/ci/selective_ci_checks.bats @@ -57,6 +57,18 @@ load bats-assert/load.bash assert_output -p needs-kubernetes-tests=false } +@test "dashboard only" { + run dev-support/ci/selective_ci_checks.sh 039dea9 + + assert_output -p 'basic-checks=["rat"]' + assert_output -p needs-build=false + assert_output -p needs-compile=false + assert_output -p needs-compose-tests=false + assert_output -p needs-dependency-check=false + assert_output -p needs-integration-tests=false + assert_output -p needs-kubernetes-tests=false +} + @test "compose and robot" { run dev-support/ci/selective_ci_checks.sh b83039eef @@ -96,7 +108,7 @@ load bats-assert/load.bash @test "integration and unit: java change" { run dev-support/ci/selective_ci_checks.sh 9aebf6e25 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false @@ -108,7 +120,7 @@ load bats-assert/load.bash @test "integration and unit: script change" { run dev-support/ci/selective_ci_checks.sh c6850484f - assert_output -p 'basic-checks=["rat","bats","unit"]' + assert_output -p 'basic-checks=["rat","bats"]' assert_output -p needs-build=false assert_output -p needs-compile=false assert_output -p needs-compose-tests=false @@ -120,7 +132,7 @@ load bats-assert/load.bash @test "script change including junit.sh" { run dev-support/ci/selective_ci_checks.sh 66093e52c6 - assert_output -p 'basic-checks=["rat","bats","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","bats","checkstyle","findbugs","native"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false @@ -132,19 +144,19 @@ load bats-assert/load.bash @test "unit only" { run dev-support/ci/selective_ci_checks.sh 1dd1d0ba3 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false assert_output -p needs-dependency-check=false - assert_output -p needs-integration-tests=false + assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @test "unit helper" { run dev-support/ci/selective_ci_checks.sh 88383d1d5 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false @@ -177,20 +189,17 @@ load bats-assert/load.bash assert_output -p needs-kubernetes-tests=false } -# disabled, because this test fails if -# hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java -# is not present in the current tree (i.e. if file is renamed, moved or deleted) -#@test "native test in other module" { -# run dev-support/ci/selective_ci_checks.sh 7d01cc14a6 -# -# assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","native","unit"]' -# assert_output -p needs-build=true -# assert_output -p needs-compile=true -# assert_output -p needs-compose-tests=false -# assert_output -p needs-dependency-check=false -# assert_output -p needs-integration-tests=false -# assert_output -p needs-kubernetes-tests=false -#} +@test "native test in other module" { + run dev-support/ci/selective_ci_checks.sh 822c0dee1a + + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","native"]' + assert_output -p needs-build=true + assert_output -p needs-compile=true + assert_output -p needs-compose-tests=false + assert_output -p needs-dependency-check=false + assert_output -p needs-integration-tests=false + assert_output -p needs-kubernetes-tests=false +} @test "kubernetes only" { run dev-support/ci/selective_ci_checks.sh 5336bb9bd @@ -219,7 +228,7 @@ load bats-assert/load.bash @test "main/java change" { run dev-support/ci/selective_ci_checks.sh 86a771dfe - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -231,7 +240,7 @@ load bats-assert/load.bash @test "..../java change" { run dev-support/ci/selective_ci_checks.sh 01c616536 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -243,7 +252,7 @@ load bats-assert/load.bash @test "java and compose change" { run dev-support/ci/selective_ci_checks.sh d0f0f806e - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","native"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -255,7 +264,7 @@ load bats-assert/load.bash @test "java and docs change" { run dev-support/ci/selective_ci_checks.sh 2c0adac26 - assert_output -p 'basic-checks=["rat","author","checkstyle","docs","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","docs","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -267,7 +276,7 @@ load bats-assert/load.bash @test "pom change" { run dev-support/ci/selective_ci_checks.sh 9129424a9 - assert_output -p 'basic-checks=["rat","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","checkstyle","findbugs","native"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -279,7 +288,7 @@ load bats-assert/load.bash @test "CI lib change" { run dev-support/ci/selective_ci_checks.sh ceb79acaa - assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat","unit"]' + assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -291,7 +300,7 @@ load bats-assert/load.bash @test "CI workflow change" { run dev-support/ci/selective_ci_checks.sh 90a8d7c01 - assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat","unit"]' + assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -316,7 +325,7 @@ load bats-assert/load.bash @test "CI workflow change (ci.yaml)" { run dev-support/ci/selective_ci_checks.sh 90fd5f2adc - assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat","unit"]' + assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index 996bd382be36..e512b4a5d626 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -233,6 +233,7 @@ function get_count_compose_files() { local ignore_array=( "^hadoop-ozone/dist/src/main/k8s" "^hadoop-ozone/dist/src/main/license" + "^hadoop-ozone/dist/src/main/compose/common/grafana/dashboards" "\.md$" ) filter_changed_files true @@ -262,18 +263,10 @@ function get_count_integration_files() { "^hadoop-ozone/integration-test" "^hadoop-ozone/fault-injection-test/mini-chaos-tests" "src/test/java" + "src/test/resources" ) - # Ozone's unit test naming convention: Test*.java - # The following makes this filter ignore all tests except those in - # integration-test and fault-injection-test. - # Directories starting with `i` under hadoop-ozone need to be listed - # explicitly, other subdirectories are captured by the second item. local ignore_array=( - "^hadoop-hdds/.*/src/test/java/.*/Test.*.java" - "^hadoop-ozone/[a-eghj-z].*/src/test/java/.*/Test.*.java" - "^hadoop-ozone/insight/src/test/java/.*/Test.*.java" - "^hadoop-ozone/interface-client/src/test/java/.*/Test.*.java" - "^hadoop-ozone/interface-storage/src/test/java/.*/Test.*.java" + $(grep -Flr 'org.apache.ozone.test.tag.Native' hadoop-ozone/integration-test) ) filter_changed_files true COUNT_INTEGRATION_CHANGED_FILES=${match_count} @@ -313,7 +306,6 @@ function check_needs_build() { start_end::group_start "Check if build is needed" local pattern_array=( "^hadoop-ozone/dev-support/checks/build.sh" - "^hadoop-ozone/dev-support/checks/native_check.sh" "src/main/java" "src/main/resources" ) @@ -450,29 +442,24 @@ function check_needs_native() { if [[ ${match_count} != "0" ]]; then add_basic_check native - fi - - start_end::group_end -} - -function check_needs_unit_test() { - start_end::group_start "Check if unit test is needed" - local pattern_array=( - "^hadoop-ozone/dev-support/checks/_mvn_unit_report.sh" - "^hadoop-ozone/dev-support/checks/unit.sh" - "^hadoop-ozone/dev-support/checks/junit.sh" - "src/test/java" - "src/test/resources" - ) - local ignore_array=( - "^hadoop-ozone/dist" - "^hadoop-ozone/fault-injection-test/mini-chaos-tests" - "^hadoop-ozone/integration-test" - ) - filter_changed_files true - - if [[ ${match_count} != "0" ]]; then - add_basic_check unit + else + local pattern_array=( + "^hadoop-ozone/dev-support/checks/junit.sh" + # dependencies + "^hadoop-hdds/annotations" + "^hadoop-hdds/common" + "^hadoop-hdds/config" + "^hadoop-hdds/hadoop-dependency-client" + "^hadoop-hdds/hadoop-dependency-test" + "^hadoop-hdds/managed-rocksdb" + "^hadoop-hdds/test-utils" + "^pom.xml" + ) + filter_changed_files + + if [[ ${match_count} != "0" ]]; then + add_basic_check native + fi fi start_end::group_end @@ -494,12 +481,14 @@ function get_count_misc_files() { "\.md$" "findbugsExcludeFile.xml" "/NOTICE$" + "^hadoop-ozone/dist/src/main/compose/common/grafana/dashboards" ) local ignore_array=( "^.github/workflows/post-commit.yml" "^hadoop-ozone/dev-support/checks/_mvn_unit_report.sh" "^hadoop-ozone/dev-support/checks/acceptance.sh" "^hadoop-ozone/dev-support/checks/integration.sh" + "^hadoop-ozone/dev-support/checks/junit.sh" "^hadoop-ozone/dev-support/checks/kubernetes.sh" ) filter_changed_files true @@ -532,7 +521,6 @@ function calculate_test_types_to_run() { compose_tests_needed=true integration_tests_needed=true kubernetes_tests_needed=true - add_basic_check unit else echo "All ${COUNT_ALL_CHANGED_FILES} changed files are known to be handled by specific checks." echo @@ -612,6 +600,5 @@ check_needs_dependency check_needs_docs check_needs_findbugs check_needs_native -check_needs_unit_test calculate_test_types_to_run set_outputs diff --git a/dev-support/rat/rat-exclusions.txt b/dev-support/rat/rat-exclusions.txt new file mode 100644 index 000000000000..4531b1b601c0 --- /dev/null +++ b/dev-support/rat/rat-exclusions.txt @@ -0,0 +1,73 @@ +###### Licensed to the Apache Software Foundation (ASF) under one +###### or more contributor license agreements. See the NOTICE file +###### distributed with this work for additional information +###### regarding copyright ownership. The ASF licenses this file +###### to you under the Apache License, Version 2.0 (the +###### "License"); you may not use this file except in compliance +###### with the License. You may obtain a copy of the License at +###### +###### http://www.apache.org/licenses/LICENSE-2.0 +###### +###### Unless required by applicable law or agreed to in writing, +###### software distributed under the License is distributed on an +###### "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +###### KIND, either express or implied. See the License for the +###### specific language governing permissions and limitations +###### under the License. + +**/*.json +.gitattributes +.github/* +CONTRIBUTING.md +README.md +SECURITY.md + +# hadoop-hdds/interface-client +src/main/resources/proto.lock + +# tools/fault-injection-service +tools/fault-injection-service/README.md + +# hadoop-hdds/framework +**/webapps/static/angular-1.8.0.min.js +**/webapps/static/angular-nvd3-1.0.9.min.js +**/webapps/static/angular-route-1.8.0.min.js +**/webapps/static/bootstrap-3.4.1/** +**/webapps/static/d3-3.5.17.min.js +**/webapps/static/jquery-3.5.1.min.js +**/webapps/static/nvd3-1.8.5.min.css +**/webapps/static/nvd3-1.8.5.min.css.map +**/webapps/static/nvd3-1.8.5.min.js +**/webapps/static/nvd3-1.8.5.min.js.map + +# hadoop-hdds/container-service +src/test/resources/123-dn-container.db/** +src/test/resources/123.container +src/test/resources/additionalfields.container +src/test/resources/incorrect.checksum.container +src/test/resources/incorrect.container +src/test/resources/test.db.ini + +# hadoop-hdds/docs +**/themes/ozonedoc/** +static/slides/* + +# hadoop-ozone/dist +**/.ssh/id_rsa* +**/log.html +**/output.xml +**/report.html +src/main/license/** + +# hadoop-ozone/integration-test +src/test/resources/ssl/* + +# hadoop-ozone/recon +**/pnpm-lock.yaml +src/test/resources/prometheus-test-response.txt + +# hadoop-ozone/shaded +**/dependency-reduced-pom.xml + +# hadoop-ozone/tools +src/test/resources/*.log \ No newline at end of file diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java index 1045f7a6a172..422943fff042 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java @@ -27,6 +27,7 @@ import org.apache.hadoop.metrics2.lib.Interns; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.ozone.OzoneConsts; import java.util.Map; @@ -51,6 +52,11 @@ public final class ContainerClientMetrics { private MutableCounterLong totalWriteChunkCalls; @Metric private MutableCounterLong totalWriteChunkBytes; + private MutableQuantiles[] listBlockLatency; + private MutableQuantiles[] getBlockLatency; + private MutableQuantiles[] getCommittedBlockLengthLatency; + private MutableQuantiles[] readChunkLatency; + private MutableQuantiles[] getSmallFileLatency; private final Map writeChunkCallsByPipeline; private final Map writeChunkBytesByPipeline; private final Map writeChunksCallsByLeaders; @@ -84,6 +90,36 @@ private ContainerClientMetrics() { writeChunkCallsByPipeline = new ConcurrentHashMap<>(); writeChunkBytesByPipeline = new ConcurrentHashMap<>(); writeChunksCallsByLeaders = new ConcurrentHashMap<>(); + + listBlockLatency = new MutableQuantiles[3]; + getBlockLatency = new MutableQuantiles[3]; + getCommittedBlockLengthLatency = new MutableQuantiles[3]; + readChunkLatency = new MutableQuantiles[3]; + getSmallFileLatency = new MutableQuantiles[3]; + int[] intervals = {60, 300, 900}; + for (int i = 0; i < intervals.length; i++) { + int interval = intervals[i]; + listBlockLatency[i] = registry + .newQuantiles("listBlockLatency" + interval + + "s", "ListBlock latency in microseconds", "ops", + "latency", interval); + getBlockLatency[i] = registry + .newQuantiles("getBlockLatency" + interval + + "s", "GetBlock latency in microseconds", "ops", + "latency", interval); + getCommittedBlockLengthLatency[i] = registry + .newQuantiles("getCommittedBlockLengthLatency" + interval + + "s", "GetCommittedBlockLength latency in microseconds", + "ops", "latency", interval); + readChunkLatency[i] = registry + .newQuantiles("readChunkLatency" + interval + + "s", "ReadChunk latency in microseconds", "ops", + "latency", interval); + getSmallFileLatency[i] = registry + .newQuantiles("getSmallFileLatency" + interval + + "s", "GetSmallFile latency in microseconds", "ops", + "latency", interval); + } } public void recordWriteChunk(Pipeline pipeline, long chunkSizeBytes) { @@ -111,28 +147,64 @@ public void recordWriteChunk(Pipeline pipeline, long chunkSizeBytes) { totalWriteChunkBytes.incr(chunkSizeBytes); } + public void addListBlockLatency(long latency) { + for (MutableQuantiles q : listBlockLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addGetBlockLatency(long latency) { + for (MutableQuantiles q : getBlockLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addGetCommittedBlockLengthLatency(long latency) { + for (MutableQuantiles q : getCommittedBlockLengthLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addReadChunkLatency(long latency) { + for (MutableQuantiles q : readChunkLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addGetSmallFileLatency(long latency) { + for (MutableQuantiles q : getSmallFileLatency) { + if (q != null) { + q.add(latency); + } + } + } + @VisibleForTesting public MutableCounterLong getTotalWriteChunkBytes() { return totalWriteChunkBytes; } - @VisibleForTesting - public MutableCounterLong getTotalWriteChunkCalls() { + MutableCounterLong getTotalWriteChunkCalls() { return totalWriteChunkCalls; } - @VisibleForTesting - public Map getWriteChunkBytesByPipeline() { + Map getWriteChunkBytesByPipeline() { return writeChunkBytesByPipeline; } - @VisibleForTesting - public Map getWriteChunkCallsByPipeline() { + Map getWriteChunkCallsByPipeline() { return writeChunkCallsByPipeline; } - @VisibleForTesting - public Map getWriteChunksCallsByLeaders() { + Map getWriteChunksCallsByLeaders() { return writeChunksCallsByLeaders; } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index d1dcc654b100..b130f48776c1 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -144,22 +144,39 @@ public enum ChecksumCombineMode { tags = ConfigTag.CLIENT) private int retryInterval = 0; + @Config(key = "read.max.retries", + defaultValue = "3", + description = "Maximum number of retries by Ozone Client on " + + "encountering connectivity exception when reading a key.", + tags = ConfigTag.CLIENT) + private int maxReadRetryCount = 3; + + @Config(key = "read.retry.interval", + defaultValue = "1", + description = + "Indicates the time duration in seconds a client will wait " + + "before retrying a read key request on encountering " + + "a connectivity excepetion from Datanodes . " + + "By default the interval is 1 second", + tags = ConfigTag.CLIENT) + private int readRetryInterval = 1; + @Config(key = "checksum.type", defaultValue = "CRC32", description = "The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] " + "determines which algorithm would be used to compute checksum for " + "chunk data. Default checksum type is CRC32.", - tags = ConfigTag.CLIENT) + tags = { ConfigTag.CLIENT, ConfigTag.CRYPTO_COMPLIANCE }) private String checksumType = ChecksumType.CRC32.name(); @Config(key = "bytes.per.checksum", - defaultValue = "1MB", + defaultValue = "16KB", type = ConfigType.SIZE, description = "Checksum will be computed for every bytes per checksum " + "number of bytes and stored sequentially. The minimum value for " - + "this config is 16KB.", - tags = ConfigTag.CLIENT) - private int bytesPerChecksum = 1024 * 1024; + + "this config is 8KB.", + tags = { ConfigTag.CLIENT, ConfigTag.CRYPTO_COMPLIANCE }) + private int bytesPerChecksum = 16 * 1024; @Config(key = "verify.checksum", defaultValue = "true", @@ -201,6 +218,13 @@ public enum ChecksumCombineMode { // 3 concurrent stripe read should be enough. private int ecReconstructStripeReadPoolLimit = 10 * 3; + @Config(key = "ec.reconstruct.stripe.write.pool.limit", + defaultValue = "30", + description = "Thread pool max size for parallelly write" + + " available ec chunks to reconstruct the whole stripe.", + tags = ConfigTag.CLIENT) + private int ecReconstructStripeWritePoolLimit = 10 * 3; + @Config(key = "checksum.combine.mode", defaultValue = "COMPOSITE_CRC", description = "The combined checksum type [MD5MD5CRC / COMPOSITE_CRC] " @@ -230,10 +254,18 @@ public enum ChecksumCombineMode { "list rather than full chunk list to optimize performance. " + "Critical to HBase.", tags = ConfigTag.CLIENT) - private boolean incrementalChunkList = false; + private boolean incrementalChunkList = true; + + @Config(key = "stream.putblock.piggybacking", + defaultValue = "false", + type = ConfigType.BOOLEAN, + description = "Allow PutBlock to be piggybacked in WriteChunk " + + "requests if the chunk is small.", + tags = ConfigTag.CLIENT) + private boolean enablePutblockPiggybacking = false; @PostConstruct - private void validate() { + public void validate() { Preconditions.checkState(streamBufferSize > 0); Preconditions.checkState(streamBufferFlushSize > 0); Preconditions.checkState(streamBufferMaxSize > 0); @@ -328,6 +360,22 @@ public void setRetryInterval(int retryInterval) { this.retryInterval = retryInterval; } + public int getMaxReadRetryCount() { + return maxReadRetryCount; + } + + public void setMaxReadRetryCount(int maxReadRetryCount) { + this.maxReadRetryCount = maxReadRetryCount; + } + + public int getReadRetryInterval() { + return readRetryInterval; + } + + public void setReadRetryInterval(int readRetryInterval) { + this.readRetryInterval = readRetryInterval; + } + public ChecksumType getChecksumType() { return ChecksumType.valueOf(checksumType); } @@ -396,6 +444,14 @@ public int getEcReconstructStripeReadPoolLimit() { return ecReconstructStripeReadPoolLimit; } + public void setEcReconstructStripeWritePoolLimit(int poolLimit) { + this.ecReconstructStripeWritePoolLimit = poolLimit; + } + + public int getEcReconstructStripeWritePoolLimit() { + return ecReconstructStripeWritePoolLimit; + } + public void setFsDefaultBucketLayout(String bucketLayout) { if (!bucketLayout.isEmpty()) { this.fsDefaultBucketLayout = bucketLayout; @@ -406,6 +462,14 @@ public String getFsDefaultBucketLayout() { return fsDefaultBucketLayout; } + public void setEnablePutblockPiggybacking(boolean enablePutblockPiggybacking) { + this.enablePutblockPiggybacking = enablePutblockPiggybacking; + } + + public boolean getEnablePutblockPiggybacking() { + return enablePutblockPiggybacking; + } + public boolean isDatastreamPipelineMode() { return datastreamPipelineMode; } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 0a38e6604897..cb2b85ef1e29 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -20,7 +20,9 @@ import java.io.IOException; import java.io.InterruptedIOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -42,6 +44,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc; import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -166,8 +169,8 @@ private synchronized void connectToDatanode(DatanodeDetails dn) // port. int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); if (port == 0) { - port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + port = config.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } // Add credential context to the client call @@ -384,6 +387,12 @@ private XceiverClientReply sendCommandWithRetry( } } + boolean allInService = datanodeList.stream() + .allMatch(dn -> dn.getPersistedOpState() == NodeOperationalState.IN_SERVICE); + if (!allInService) { + datanodeList = sortDatanodeByOperationalState(datanodeList); + } + for (DatanodeDetails dn : datanodeList) { try { if (LOG.isDebugEnabled()) { @@ -440,13 +449,37 @@ private XceiverClientReply sendCommandWithRetry( LOG.debug(message + " on the pipeline {}.", processForDebug(request), pipeline); } else { - LOG.error(message + " on the pipeline {}.", + LOG.warn(message + " on the pipeline {}.", request.getCmdType(), pipeline); } throw ioException; } } + private static List sortDatanodeByOperationalState( + List datanodeList) { + List sortedDatanodeList = new ArrayList<>(datanodeList); + // Make IN_SERVICE's Datanode precede all other State's Datanodes. + // This is a stable sort that does not change the order of the + // IN_SERVICE's Datanode. + Comparator byOpStateStable = (first, second) -> { + boolean firstInService = first.getPersistedOpState() == + NodeOperationalState.IN_SERVICE; + boolean secondInService = second.getPersistedOpState() == + NodeOperationalState.IN_SERVICE; + + if (firstInService == secondInService) { + return 0; + } else if (firstInService) { + return -1; + } else { + return 1; + } + }; + sortedDatanodeList.sort(byOpStateStable); + return sortedDatanodeList; + } + @Override public XceiverClientReply sendCommandAsync( ContainerCommandRequestProto request) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java index 96db6d13fea5..ade4cbcab3c4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java @@ -19,21 +19,27 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableRate; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.util.PerformanceMetrics; +import org.apache.hadoop.util.PerformanceMetricsInitializer; /** * The client metrics for the Storage Container protocol. */ @InterfaceAudience.Private @Metrics(about = "Storage Container Client Metrics", context = "dfs") -public class XceiverClientMetrics { +public class XceiverClientMetrics implements MetricsSource { public static final String SOURCE_NAME = XceiverClientMetrics.class .getSimpleName(); @@ -43,8 +49,11 @@ public class XceiverClientMetrics { private @Metric MutableCounterLong ecReconstructionFailsTotal; private MutableCounterLong[] pendingOpsArray; private MutableCounterLong[] opsArray; - private MutableRate[] containerOpsLatency; + private PerformanceMetrics[] containerOpsLatency; private MetricsRegistry registry; + private OzoneConfiguration conf = new OzoneConfiguration(); + private int[] intervals = conf.getInts(OzoneConfigKeys + .OZONE_XCEIVER_CLIENT_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); public XceiverClientMetrics() { init(); @@ -56,7 +65,7 @@ public void init() { this.pendingOpsArray = new MutableCounterLong[numEnumEntries]; this.opsArray = new MutableCounterLong[numEnumEntries]; - this.containerOpsLatency = new MutableRate[numEnumEntries]; + this.containerOpsLatency = new PerformanceMetrics[numEnumEntries]; for (int i = 0; i < numEnumEntries; i++) { pendingOpsArray[i] = registry.newCounter( "numPending" + ContainerProtos.Type.forNumber(i + 1), @@ -66,11 +75,11 @@ public void init() { .newCounter("opCount" + ContainerProtos.Type.forNumber(i + 1), "number of" + ContainerProtos.Type.forNumber(i + 1) + " ops", (long) 0); - - containerOpsLatency[i] = registry.newRate( - ContainerProtos.Type.forNumber(i + 1) + "Latency", - "latency of " + ContainerProtos.Type.forNumber(i + 1) - + " ops"); + containerOpsLatency[i] = + PerformanceMetricsInitializer.getMetrics(registry, + ContainerProtos.Type.forNumber(i + 1) + "Latency", + "latency of " + ContainerProtos.Type.forNumber(i + 1), + "Ops", "Time", intervals); } } @@ -129,4 +138,21 @@ public void unRegister() { MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); } + + @Override + public void getMetrics(MetricsCollector collector, boolean b) { + MetricsRecordBuilder recordBuilder = collector.addRecord(SOURCE_NAME); + + pendingOps.snapshot(recordBuilder, true); + totalOps.snapshot(recordBuilder, true); + ecReconstructionTotal.snapshot(recordBuilder, true); + ecReconstructionFailsTotal.snapshot(recordBuilder, true); + + int numEnumEntries = ContainerProtos.Type.values().length; + for (int i = 0; i < numEnumEntries; i++) { + pendingOpsArray[i].snapshot(recordBuilder, true); + opsArray[i].snapshot(recordBuilder, true); + containerOpsLatency[i].snapshot(recordBuilder, true); + } + } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index aff0aa966a79..62d1579ef26d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -83,8 +83,8 @@ public static XceiverClientRatis newXceiverClientRatis( org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, ConfigurationSource ozoneConf, ClientTrustManager trustManager) { final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + .get(ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new SecurityConfig(ozoneConf), trustManager); @@ -148,8 +148,9 @@ public long updateCommitInfosMap( // been replicating data successfully. } else { stream = commitInfoProtos.stream().map(proto -> commitInfoMap - .computeIfPresent(RatisHelper.toDatanodeId(proto.getServer()), - (address, index) -> proto.getCommitIndex())); + .computeIfPresent(RatisHelper.toDatanodeId(proto.getServer()), + (address, index) -> proto.getCommitIndex())) + .filter(Objects::nonNull); } return stream.mapToLong(Long::longValue).min().orElse(0); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java index 0e297ae0051c..27fb62dc3c69 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java @@ -118,7 +118,7 @@ public ClientTrustManager(CACertificateProvider remoteProvider, private void initialize(List caCerts) throws CertificateException { try { - KeyStore ks = KeyStore.getInstance("jks"); + KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); ks.load(null); for (X509Certificate cert : caCerts) { @@ -145,7 +145,7 @@ private void initialize(List caCerts) private List loadCerts(CACertificateProvider caCertsProvider) throws CertificateException { try { - LOG.info("Loading certificates for client."); + LOG.debug("Loading certificates for client."); if (caCertsProvider == null) { return remoteProvider.provideCACerts(); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java index 0c5501c7922c..957f761ccbc2 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java @@ -73,7 +73,7 @@ SortedMap> getCommitIndexMap() { return commitIndexMap; } - void updateCommitInfoMap(long index, List buffers) { + synchronized void updateCommitInfoMap(long index, List buffers) { commitIndexMap.computeIfAbsent(index, k -> new LinkedList<>()) .addAll(buffers); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index d06b1816dc56..2f20d5ec706d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -21,6 +21,7 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -37,6 +38,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi.Validator; @@ -78,8 +80,8 @@ public class BlockInputStream extends BlockExtendedInputStream { private XceiverClientSpi xceiverClient; private boolean initialized = false; // TODO: do we need to change retrypolicy based on exception. - private final RetryPolicy retryPolicy = - HddsClientUtils.createRetryPolicy(3, TimeUnit.SECONDS.toMillis(1)); + private final RetryPolicy retryPolicy; + private int retries; // List of ChunkInputStreams, one for each chunk in the block @@ -113,28 +115,34 @@ public class BlockInputStream extends BlockExtendedInputStream { private final Function refreshFunction; - public BlockInputStream(BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + public BlockInputStream( + BlockLocationInfo blockInfo, + Pipeline pipeline, + Token token, XceiverClientFactory xceiverClientFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { this.blockInfo = blockInfo; this.blockID = blockInfo.getBlockID(); this.length = blockInfo.getLength(); setPipeline(pipeline); tokenRef.set(token); - this.verifyChecksum = verifyChecksum; + this.verifyChecksum = config.isChecksumVerify(); this.xceiverClientFactory = xceiverClientFactory; this.refreshFunction = refreshFunction; + this.retryPolicy = + HddsClientUtils.createRetryPolicy(config.getMaxReadRetryCount(), + TimeUnit.SECONDS.toMillis(config.getReadRetryInterval())); } // only for unit tests public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, Token token, - boolean verifyChecksum, - XceiverClientFactory xceiverClientFactory) { + XceiverClientFactory xceiverClientFactory, + OzoneClientConfig config + ) { this(new BlockLocationInfo(new BlockLocationInfo.Builder().setBlockID(blockId).setLength(blockLen)), - pipeline, token, verifyChecksum, - xceiverClientFactory, null); + pipeline, token, xceiverClientFactory, null, config); } /** @@ -217,18 +225,25 @@ private boolean isConnectivityIssue(IOException ex) { } private void refreshBlockInfo(IOException cause) throws IOException { - LOG.info("Unable to read information for block {} from pipeline {}: {}", + LOG.info("Attempting to update pipeline and block token for block {} from pipeline {}: {}", blockID, pipelineRef.get().getId(), cause.getMessage()); if (refreshFunction != null) { LOG.debug("Re-fetching pipeline and block token for block {}", blockID); BlockLocationInfo blockLocationInfo = refreshFunction.apply(blockID); if (blockLocationInfo == null) { - LOG.debug("No new block location info for block {}", blockID); + LOG.warn("No new block location info for block {}", blockID); } else { - LOG.debug("New pipeline for block {}: {}", blockID, - blockLocationInfo.getPipeline()); setPipeline(blockLocationInfo.getPipeline()); + LOG.info("New pipeline for block {}: {}", blockID, + blockLocationInfo.getPipeline()); + tokenRef.set(blockLocationInfo.getToken()); + if (blockLocationInfo.getToken() != null) { + OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(); + tokenId.readFromByteArray(tokenRef.get().getIdentifier()); + LOG.info("A new token is added for block {}. Expiry: {}", + blockID, Instant.ofEpochMilli(tokenId.getExpiryDate())); + } } } else { throw cause; @@ -302,10 +317,18 @@ private static void validate(ContainerCommandResponseProto response) throw new IllegalArgumentException("Not GetBlock: response=" + response); } final GetBlockResponseProto b = response.getGetBlock(); + final long blockLength = b.getBlockData().getSize(); final List chunks = b.getBlockData().getChunksList(); for (int i = 0; i < chunks.size(); i++) { final ChunkInfo c = chunks.get(i); - if (c.getLen() <= 0) { + // HDDS-10682 caused an empty chunk to get written to the end of some EC blocks. Due to this + // validation, these blocks will not be readable. In the EC case, the empty chunk is always + // the last chunk and the offset is the block length. We can safely ignore this case and not fail. + if (c.getLen() <= 0 && i == chunks.size() - 1 && c.getOffset() == blockLength) { + DatanodeBlockID blockID = b.getBlockData().getBlockID(); + LOG.warn("The last chunk is empty for container/block {}/{} with an offset of the block length. " + + "Likely due to HDDS-10682. This is safe to ignore.", blockID.getContainerID(), blockID.getLocalID()); + } else if (c.getLen() <= 0) { throw new IOException("Failed to get chunkInfo[" + i + "]: len == " + c.getLen()); } @@ -574,7 +597,20 @@ private boolean shouldRetryRead(IOException cause) throws IOException { } catch (Exception e) { throw new IOException(e); } - return retryAction.action == RetryPolicy.RetryAction.RetryDecision.RETRY; + if (retryAction.action == RetryPolicy.RetryAction.RetryDecision.RETRY) { + if (retryAction.delayMillis > 0) { + try { + LOG.debug("Retry read after {}ms", retryAction.delayMillis); + Thread.sleep(retryAction.delayMillis); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + String msg = "Interrupted: action=" + retryAction.action + ", retry policy=" + retryPolicy; + throw new IOException(msg, e); + } + } + return true; + } + return false; } private void handleReadError(IOException cause) throws IOException { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index a6cd98e48ad9..c1f92c83373c 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -27,9 +27,9 @@ import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -52,9 +52,12 @@ import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.DirectBufferPool; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; + +import static org.apache.hadoop.hdds.DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.writeChunkAsync; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; @@ -102,6 +105,7 @@ public class BlockOutputStream extends OutputStream { private int chunkIndex; private final AtomicLong chunkOffset = new AtomicLong(); private final BufferPool bufferPool; + private static final DirectBufferPool DIRECT_BUFFER_POOL = new DirectBufferPool(); // The IOException will be set by response handling thread in case there is an // exception received in the response. If the exception is set, the next // request will fail upfront. @@ -140,6 +144,7 @@ public class BlockOutputStream extends OutputStream { private int replicationIndex; private Pipeline pipeline; private final ContainerClientMetrics clientMetrics; + private boolean allowPutBlockPiggybacking; /** * Creates a new BlockOutputStream. @@ -157,7 +162,8 @@ public BlockOutputStream( BufferPool bufferPool, OzoneClientConfig config, Token token, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs + ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs, + Supplier blockOutputStreamResourceProvider ) throws IOException { this.xceiverClientFactory = xceiverClientManager; this.config = config; @@ -179,9 +185,10 @@ public BlockOutputStream( // tell DataNode I will send incremental chunk list if (config.getIncrementalChunkList()) { this.containerBlockData.addMetadata(INCREMENTAL_CHUNK_LIST_KV); - this.lastChunkBuffer = - ByteBuffer.allocate(config.getStreamBufferSize()); + this.lastChunkBuffer = DIRECT_BUFFER_POOL.getBuffer(config.getStreamBufferSize()); this.lastChunkOffset = 0; + } else { + this.lastChunkBuffer = null; } this.xceiverClient = xceiverClientManager.acquireClient(pipeline); this.bufferPool = bufferPool; @@ -199,8 +206,7 @@ public BlockOutputStream( (long) flushPeriod * streamBufferArgs.getStreamBufferSize() == streamBufferArgs .getStreamBufferFlushSize()); - // A single thread executor handle the responses of async requests - responseExecutor = Executors.newSingleThreadExecutor(); + this.responseExecutor = blockOutputStreamResourceProvider.get(); bufferList = null; totalDataFlushedLength = 0; writtenDataLength = 0; @@ -211,6 +217,20 @@ public BlockOutputStream( this.clientMetrics = clientMetrics; this.pipeline = pipeline; this.streamBufferArgs = streamBufferArgs; + this.allowPutBlockPiggybacking = config.getEnablePutblockPiggybacking() && + allDataNodesSupportPiggybacking(); + } + + private boolean allDataNodesSupportPiggybacking() { + // return true only if all DataNodes in the pipeline are on a version + // that supports PutBlock piggybacking. + for (DatanodeDetails dn : pipeline.getNodes()) { + if (dn.getCurrentVersion() < + COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue()) { + return false; + } + } + return true; } void refreshCurrentBuffer() { @@ -499,22 +519,8 @@ ContainerCommandResponseProto> executePutBlock(boolean close, } // if the ioException is not set, putBlock is successful if (getIoException() == null && !force) { - BlockID responseBlockID = BlockID.getFromProtobuf( - e.getPutBlock().getCommittedBlockLength().getBlockID()); - Preconditions.checkState(blockID.get().getContainerBlockID() - .equals(responseBlockID.getContainerBlockID())); - // updates the bcsId of the block - blockID.set(responseBlockID); - if (LOG.isDebugEnabled()) { - LOG.debug( - "Adding index " + asyncReply.getLogIndex() + " flushLength " - + flushPos + " numBuffers " + byteBufferList.size() - + " blockID " + blockID + " bufferPool size" + bufferPool - .getSize() + " currentBufferIndex " + bufferPool - .getCurrentBufferIndex()); - } - // for standalone protocol, logIndex will always be 0. - updateCommitInfo(asyncReply, byteBufferList); + handleSuccessfulPutBlock(e.getPutBlock().getCommittedBlockLength(), + asyncReply, flushPos, byteBufferList); } return e; }, responseExecutor).exceptionally(e -> { @@ -551,7 +557,7 @@ public void flush() throws IOException { } } - private void writeChunk(ChunkBuffer buffer) + private void writeChunkCommon(ChunkBuffer buffer) throws IOException { // This data in the buffer will be pushed to datanode and a reference will // be added to the bufferList. Once putBlock gets executed, this list will @@ -562,7 +568,18 @@ private void writeChunk(ChunkBuffer buffer) bufferList = new ArrayList<>(); } bufferList.add(buffer); - writeChunkToContainer(buffer.duplicate(0, buffer.position())); + } + + private void writeChunk(ChunkBuffer buffer) + throws IOException { + writeChunkCommon(buffer); + writeChunkToContainer(buffer.duplicate(0, buffer.position()), false); + } + + private void writeChunkAndPutBlock(ChunkBuffer buffer) + throws IOException { + writeChunkCommon(buffer); + writeChunkToContainer(buffer.duplicate(0, buffer.position()), true); } /** @@ -594,14 +611,23 @@ private void handleFlushInternal(boolean close) if (totalDataFlushedLength < writtenDataLength) { refreshCurrentBuffer(); Preconditions.checkArgument(currentBuffer.position() > 0); - if (currentBuffer.hasRemaining()) { - writeChunk(currentBuffer); - } + // This can be a partially filled chunk. Since we are flushing the buffer // here, we just limit this buffer to the current position. So that next // write will happen in new buffer - updateFlushLength(); - executePutBlock(close, false); + if (currentBuffer.hasRemaining()) { + if (allowPutBlockPiggybacking) { + updateFlushLength(); + writeChunkAndPutBlock(currentBuffer); + } else { + writeChunk(currentBuffer); + updateFlushLength(); + executePutBlock(close, false); + } + } else { + updateFlushLength(); + executePutBlock(close, false); + } } else if (close) { // forcing an "empty" putBlock if stream is being closed without new // data since latest flush - we need to send the "EOF" flag @@ -678,11 +704,14 @@ public void cleanup(boolean invalidateClient) { xceiverClient = null; cleanup(); - if (bufferList != null) { + if (bufferList != null) { bufferList.clear(); } bufferList = null; - responseExecutor.shutdown(); + if (lastChunkBuffer != null) { + DIRECT_BUFFER_POOL.returnBuffer(lastChunkBuffer); + lastChunkBuffer = null; + } } /** @@ -714,7 +743,7 @@ public boolean isClosed() { * @return */ CompletableFuture writeChunkToContainer( - ChunkBuffer chunk) throws IOException { + ChunkBuffer chunk, boolean putBlockPiggybacking) throws IOException { int effectiveChunkSize = chunk.remaining(); final long offset = chunkOffset.getAndAdd(effectiveChunkSize); final ByteString data = chunk.toByteString( @@ -727,6 +756,8 @@ CompletableFuture writeChunkToContainer( .setChecksumData(checksumData.getProtoBufMessage()) .build(); + long flushPos = totalDataFlushedLength; + if (LOG.isDebugEnabled()) { LOG.debug("Writing chunk {} length {} at offset {}", chunkInfo.getChunkName(), effectiveChunkSize, offset); @@ -744,42 +775,93 @@ CompletableFuture writeChunkToContainer( + ", previous = " + previous); } + final List byteBufferList; + CompletableFuture + validateFuture = null; try { - XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, - blockID.get(), data, tokenString, replicationIndex); - CompletableFuture - respFuture = asyncReply.getResponse(); - CompletableFuture - validateFuture = respFuture.thenApplyAsync(e -> { - try { - validateResponse(e); - } catch (IOException sce) { - respFuture.completeExceptionally(sce); - } - return e; - }, responseExecutor).exceptionally(e -> { - String msg = "Failed to write chunk " + chunkInfo.getChunkName() + - " into block " + blockID; - LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage()); - CompletionException ce = new CompletionException(msg, e); - setIoException(ce); - throw ce; - }); + BlockData blockData = null; + if (config.getIncrementalChunkList()) { updateBlockDataForWriteChunk(chunk); } else { containerBlockData.addChunks(chunkInfo); } + if (putBlockPiggybacking) { + Preconditions.checkNotNull(bufferList); + byteBufferList = bufferList; + bufferList = null; + Preconditions.checkNotNull(byteBufferList); + + blockData = containerBlockData.build(); + LOG.debug("piggyback chunk list {}", blockData); + + if (config.getIncrementalChunkList()) { + // remove any chunks in the containerBlockData list. + // since they are sent. + containerBlockData.clearChunks(); + } + } else { + byteBufferList = null; + } + XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, + blockID.get(), data, tokenString, replicationIndex, blockData); + CompletableFuture + respFuture = asyncReply.getResponse(); + validateFuture = respFuture.thenApplyAsync(e -> { + try { + validateResponse(e); + } catch (IOException sce) { + respFuture.completeExceptionally(sce); + } + // if the ioException is not set, putBlock is successful + if (getIoException() == null && putBlockPiggybacking) { + handleSuccessfulPutBlock(e.getWriteChunk().getCommittedBlockLength(), + asyncReply, flushPos, byteBufferList); + } + return e; + }, responseExecutor).exceptionally(e -> { + String msg = "Failed to write chunk " + chunkInfo.getChunkName() + + " into block " + blockID; + LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage()); + CompletionException ce = new CompletionException(msg, e); + setIoException(ce); + throw ce; + }); clientMetrics.recordWriteChunk(pipeline, chunkInfo.getLen()); - return validateFuture; + } catch (IOException | ExecutionException e) { throw new IOException(EXCEPTION_MSG + e.toString(), e); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); handleInterruptedException(ex, false); } - return null; + if (putBlockPiggybacking) { + putFlushFuture(flushPos, validateFuture); + } + return validateFuture; + } + + private void handleSuccessfulPutBlock( + ContainerProtos.GetCommittedBlockLengthResponseProto e, + XceiverClientReply asyncReply, long flushPos, + List byteBufferList) { + BlockID responseBlockID = BlockID.getFromProtobuf( + e.getBlockID()); + Preconditions.checkState(blockID.get().getContainerBlockID() + .equals(responseBlockID.getContainerBlockID())); + // updates the bcsId of the block + blockID.set(responseBlockID); + if (LOG.isDebugEnabled()) { + LOG.debug( + "Adding index " + asyncReply.getLogIndex() + " flushLength " + + flushPos + " numBuffers " + byteBufferList.size() + + " blockID " + blockID + " bufferPool size" + bufferPool + .getSize() + " currentBufferIndex " + bufferPool + .getCurrentBufferIndex()); + } + // for standalone protocol, logIndex will always be 0. + updateCommitInfo(asyncReply, byteBufferList); } /** @@ -855,7 +937,11 @@ private void appendLastChunkBuffer(ChunkBuffer chunkBuffer, int offset, try { LOG.debug("put into last chunk buffer start = {} len = {}", copyStart, copyLen); - lastChunkBuffer.put(bb.array(), copyStart, copyLen); + int origPos = bb.position(); + int origLimit = bb.limit(); + bb.position(copyStart).limit(copyStart + copyLen); + lastChunkBuffer.put(bb); + bb.position(origPos).limit(origLimit); } catch (BufferOverflowException e) { LOG.error("appending from " + copyStart + " for len=" + copyLen + ". lastChunkBuffer remaining=" + lastChunkBuffer.remaining() + diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java index 274b977ef623..b68b56f67c72 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java @@ -111,6 +111,7 @@ void releaseBuffer(ChunkBuffer chunkBuffer) { } public void clearBufferPool() { + bufferList.forEach(ChunkBuffer::close); bufferList.clear(); currentBufferIndex = -1; } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java index 3c7f8a2360c8..aa339409eceb 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java @@ -24,6 +24,7 @@ */ package org.apache.hadoop.hdds.scm.storage; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.ozone.common.ChunkBuffer; @@ -32,6 +33,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; /** * This class executes watchForCommit on ratis pipeline and releases @@ -42,8 +44,8 @@ class CommitWatcher extends AbstractCommitWatcher { private final BufferPool bufferPool; // future Map to hold up all putBlock futures - private final ConcurrentMap> futureMap = new ConcurrentHashMap<>(); + private final ConcurrentMap> + futureMap = new ConcurrentHashMap<>(); CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient) { super(xceiverClient); @@ -67,11 +69,24 @@ void releaseBuffers(long index) { + totalLength + ": existing = " + futureMap.keySet()); } - ConcurrentMap> getFutureMap() { + @VisibleForTesting + ConcurrentMap> getFutureMap() { return futureMap; } + public void putFlushFuture(long flushPos, CompletableFuture flushFuture) { + futureMap.compute(flushPos, + (key, previous) -> previous == null ? flushFuture : + previous.thenCombine(flushFuture, (prev, curr) -> curr)); + } + + + public void waitOnFlushFutures() throws InterruptedException, ExecutionException { + // wait for all the transactions to complete + CompletableFuture.allOf(futureMap.values().toArray( + new CompletableFuture[0])).get(); + } + @Override public void cleanup() { super.cleanup(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java index 0abc2274bf08..a57ae74f991b 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java @@ -44,6 +44,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; import java.util.stream.Collectors; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync; @@ -75,10 +77,11 @@ public ECBlockOutputStream( BufferPool bufferPool, OzoneClientConfig config, Token token, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs + ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs, + Supplier executorServiceSupplier ) throws IOException { super(blockID, xceiverClientManager, - pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs); + pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs, executorServiceSupplier); // In EC stream, there will be only one node in pipeline. this.datanodeDetails = pipeline.getClosestNode(); } @@ -86,13 +89,14 @@ public ECBlockOutputStream( @Override public void write(byte[] b, int off, int len) throws IOException { this.currentChunkRspFuture = - writeChunkToContainer(ChunkBuffer.wrap(ByteBuffer.wrap(b, off, len))); + writeChunkToContainer( + ChunkBuffer.wrap(ByteBuffer.wrap(b, off, len)), false); updateWrittenDataLength(len); } public CompletableFuture write( ByteBuffer buff) throws IOException { - return writeChunkToContainer(ChunkBuffer.wrap(buff)); + return writeChunkToContainer(ChunkBuffer.wrap(buff), false); } public CompletableFuture executePutBlock(boolean close, } BlockData checksumBlockData = null; + BlockID blockID = null; //Reverse Traversal as all parity will have checksumBytes for (int i = blockData.length - 1; i >= 0; i--) { BlockData bd = blockData[i]; if (bd == null) { continue; } + if (blockID == null) { + // store the BlockID for logging + blockID = bd.getBlockID(); + } List chunks = bd.getChunks(); - if (chunks != null && chunks.size() > 0 && chunks.get(0) - .hasStripeChecksum()) { - checksumBlockData = bd; - break; + if (chunks != null && chunks.size() > 0) { + if (chunks.get(0).hasStripeChecksum()) { + checksumBlockData = bd; + break; + } else { + ChunkInfo chunk = chunks.get(0); + LOG.debug("The first chunk in block with index {} does not have stripeChecksum. BlockID: {}, Block " + + "size: {}. Chunk length: {}, Chunk offset: {}, hasChecksumData: {}, chunks size: {}.", i, + bd.getBlockID(), bd.getSize(), chunk.getLen(), chunk.getOffset(), chunk.hasChecksumData(), chunks.size()); + } } } @@ -155,9 +170,8 @@ ContainerCommandResponseProto> executePutBlock(boolean close, getContainerBlockData().clearChunks(); getContainerBlockData().addAllChunks(newChunkList); } else { - throw new IOException("None of the block data have checksum " + - "which means " + parity + "(parity)+1 blocks are " + - "not present"); + LOG.warn("Could not find checksum data in any index for blockData with BlockID {}, length {} and " + + "blockGroupLength {}.", blockID, blockData.length, blockGroupLength); } return executePutBlock(close, force, blockGroupLength); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java index b52fc2af9178..b587b1d13171 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java @@ -37,6 +37,8 @@ import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; /** * An {@link OutputStream} used by the REST service in combination with the @@ -65,8 +67,8 @@ public class RatisBlockOutputStream extends BlockOutputStream /** * Creates a new BlockOutputStream. * - * @param blockID block ID - * @param bufferPool pool of buffers + * @param blockID block ID + * @param bufferPool pool of buffers */ @SuppressWarnings("checkstyle:ParameterNumber") public RatisBlockOutputStream( @@ -76,10 +78,11 @@ public RatisBlockOutputStream( BufferPool bufferPool, OzoneClientConfig config, Token token, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs + ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs, + Supplier blockOutputStreamResourceProvider ) throws IOException { super(blockID, xceiverClientManager, pipeline, - bufferPool, config, token, clientMetrics, streamBufferArgs); + bufferPool, config, token, clientMetrics, streamBufferArgs, blockOutputStreamResourceProvider); this.commitWatcher = new CommitWatcher(bufferPool, getXceiverClient()); } @@ -110,16 +113,13 @@ void updateCommitInfo(XceiverClientReply reply, List buffers) { } @Override - void putFlushFuture(long flushPos, - CompletableFuture flushFuture) { - commitWatcher.getFutureMap().put(flushPos, flushFuture); + void putFlushFuture(long flushPos, CompletableFuture flushFuture) { + commitWatcher.putFlushFuture(flushPos, flushFuture); } @Override void waitOnFlushFutures() throws InterruptedException, ExecutionException { - // wait for all the transactions to complete - CompletableFuture.allOf(commitWatcher.getFutureMap().values().toArray( - new CompletableFuture[0])).get(); + commitWatcher.waitOnFlushFutures(); } @Override diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java index bd100214ae48..6f8a744f762d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -48,8 +49,9 @@ public interface BlockInputStreamFactory { */ BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverFactory, - Function refreshFunction); + Function refreshFunction, + OzoneClientConfig config); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java index b9233f42d555..7edc498cf676 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -76,16 +77,18 @@ public BlockInputStreamFactoryImpl(ByteBufferPool byteBufferPool, */ public BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { if (repConfig.getReplicationType().equals(HddsProtos.ReplicationType.EC)) { return new ECBlockInputStreamProxy((ECReplicationConfig)repConfig, - blockInfo, verifyChecksum, xceiverFactory, refreshFunction, - ecBlockStreamFactory); + blockInfo, xceiverFactory, refreshFunction, + ecBlockStreamFactory, config); } else { - return new BlockInputStream(blockInfo, pipeline, token, verifyChecksum, xceiverFactory, - refreshFunction); + return new BlockInputStream(blockInfo, + pipeline, token, xceiverFactory, refreshFunction, + config); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java index 174fd8c75f6d..19ce31c52932 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java @@ -39,6 +39,11 @@ public void write(@Nonnull byte[] byteArray) throws IOException { write(ByteBuffer.wrap(byteArray)); } + @Override + public void write(@Nonnull byte[] byteArray, int off, int len) throws IOException { + write(ByteBuffer.wrap(byteArray), off, len); + } + @Override public void write(int b) throws IOException { write(new byte[]{(byte) b}); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index e85bf27d530f..8dc07f129b9c 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -60,7 +61,6 @@ public class ECBlockInputStream extends BlockExtendedInputStream { private final int ecChunkSize; private final long stripeSize; private final BlockInputStreamFactory streamFactory; - private final boolean verifyChecksum; private final XceiverClientFactory xceiverClientFactory; private final Function refreshFunction; private final BlockLocationInfo blockInfo; @@ -75,7 +75,7 @@ public class ECBlockInputStream extends BlockExtendedInputStream { private long position = 0; private boolean closed = false; private boolean seeked = false; - + private OzoneClientConfig config; protected ECReplicationConfig getRepConfig() { return repConfig; } @@ -108,13 +108,13 @@ protected int availableDataLocations(int expectedLocations) { } public ECBlockInputStream(ECReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverClientFactory, Function refreshFunction, - BlockInputStreamFactory streamFactory) { + BlockInputStreamFactory streamFactory, + OzoneClientConfig config) { this.repConfig = repConfig; this.ecChunkSize = repConfig.getEcChunkSize(); - this.verifyChecksum = verifyChecksum; this.blockInfo = blockInfo; this.streamFactory = streamFactory; this.xceiverClientFactory = xceiverClientFactory; @@ -123,6 +123,7 @@ public ECBlockInputStream(ECReplicationConfig repConfig, this.dataLocations = new DatanodeDetails[repConfig.getRequiredNodes()]; this.blockStreams = new BlockExtendedInputStream[repConfig.getRequiredNodes()]; + this.config = config; this.stripeSize = (long)ecChunkSize * repConfig.getData(); setBlockLocations(this.blockInfo.getPipeline()); @@ -191,8 +192,9 @@ protected BlockExtendedInputStream getOrOpenStream(int locationIndex) { StandaloneReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE), blkInfo, pipeline, - blockInfo.getToken(), verifyChecksum, xceiverClientFactory, - ecPipelineRefreshFunction(locationIndex + 1, refreshFunction)); + blockInfo.getToken(), xceiverClientFactory, + ecPipelineRefreshFunction(locationIndex + 1, refreshFunction), + config); blockStreams[locationIndex] = stream; LOG.debug("{}: created stream [{}]: {}", this, locationIndex, stream); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java index 0e2ef22c1e94..66e7a31337a6 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -51,7 +52,8 @@ public interface ECBlockInputStreamFactory { */ BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverFactory, - Function refreshFunction); + Function refreshFunction, + OzoneClientConfig config); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java index 36b6539ea817..01d0b0a7b7e8 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -74,16 +75,17 @@ private ECBlockInputStreamFactoryImpl(BlockInputStreamFactory streamFactory, */ public BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { if (missingLocations) { // We create the reconstruction reader ECBlockReconstructedStripeInputStream sis = new ECBlockReconstructedStripeInputStream( - (ECReplicationConfig)repConfig, blockInfo, verifyChecksum, + (ECReplicationConfig)repConfig, blockInfo, xceiverFactory, refreshFunction, inputStreamFactory, - byteBufferPool, ecReconstructExecutorSupplier.get()); + byteBufferPool, ecReconstructExecutorSupplier.get(), config); if (failedLocations != null) { sis.addFailedDatanodes(failedLocations); } @@ -92,7 +94,8 @@ public BlockExtendedInputStream create(boolean missingLocations, } else { // Otherwise create the more efficient non-reconstruction reader return new ECBlockInputStream((ECReplicationConfig)repConfig, blockInfo, - verifyChecksum, xceiverFactory, refreshFunction, inputStreamFactory); + xceiverFactory, refreshFunction, inputStreamFactory, + config); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java index 973561616f7b..68a0337cef1d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -49,7 +50,6 @@ public class ECBlockInputStreamProxy extends BlockExtendedInputStream { LoggerFactory.getLogger(ECBlockInputStreamProxy.class); private final ECReplicationConfig repConfig; - private final boolean verifyChecksum; private final XceiverClientFactory xceiverClientFactory; private final Function refreshFunction; private final BlockLocationInfo blockInfo; @@ -59,6 +59,7 @@ public class ECBlockInputStreamProxy extends BlockExtendedInputStream { private boolean reconstructionReader = false; private List failedLocations = new ArrayList<>(); private boolean closed = false; + private OzoneClientConfig config; /** * Given the ECReplicationConfig and the block length, calculate how many @@ -97,16 +98,17 @@ public static int availableDataLocations(Pipeline pipeline, } public ECBlockInputStreamProxy(ECReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverClientFactory, Function refreshFunction, - ECBlockInputStreamFactory streamFactory) { + ECBlockInputStreamFactory streamFactory, + OzoneClientConfig config) { this.repConfig = repConfig; - this.verifyChecksum = verifyChecksum; this.blockInfo = blockInfo; this.ecBlockInputStreamFactory = streamFactory; this.xceiverClientFactory = xceiverClientFactory; this.refreshFunction = refreshFunction; + this.config = config; setReaderType(); createBlockReader(); @@ -124,8 +126,8 @@ private void createBlockReader() { .incECReconstructionTotal(); } blockReader = ecBlockInputStreamFactory.create(reconstructionReader, - failedLocations, repConfig, blockInfo, verifyChecksum, - xceiverClientFactory, refreshFunction); + failedLocations, repConfig, blockInfo, + xceiverClientFactory, refreshFunction, config); } @Override diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java index 142825cb1206..31f94e0acad6 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -152,14 +153,15 @@ public class ECBlockReconstructedStripeInputStream extends ECBlockInputStream { @SuppressWarnings("checkstyle:ParameterNumber") public ECBlockReconstructedStripeInputStream(ECReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverClientFactory, Function refreshFunction, BlockInputStreamFactory streamFactory, ByteBufferPool byteBufferPool, - ExecutorService ecReconstructExecutor) { - super(repConfig, blockInfo, verifyChecksum, xceiverClientFactory, - refreshFunction, streamFactory); + ExecutorService ecReconstructExecutor, + OzoneClientConfig config) { + super(repConfig, blockInfo, xceiverClientFactory, + refreshFunction, streamFactory, config); this.byteBufferPool = byteBufferPool; this.executor = ecReconstructExecutor; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java new file mode 100644 index 000000000000..0dd29cb50a45 --- /dev/null +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.junit.jupiter.api.Test; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE; +import static org.junit.jupiter.api.Assertions.assertEquals; + +class TestOzoneClientConfig { + + @Test + void missingSizeSuffix() { + final int bytes = 1024; + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt("ozone.client.bytes.per.checksum", bytes); + + OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class); + + assertEquals(OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE, subject.getBytesPerChecksum()); + } +} diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java index ca3199d8acfb..2987a9b6136f 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; @@ -45,14 +46,14 @@ class DummyBlockInputStream extends BlockInputStream { long blockLen, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverClientManager, Function refreshFunction, List chunkList, - Map chunks) { + Map chunks, + OzoneClientConfig config) { super(new BlockLocationInfo(new BlockLocationInfo.Builder().setBlockID(blockId).setLength(blockLen)), - pipeline, token, verifyChecksum, - xceiverClientManager, refreshFunction); + pipeline, token, + xceiverClientManager, refreshFunction, config); this.chunkDataMap = chunks; this.chunks = chunkList; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java index d66c76dcddcb..172e62887bdd 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; @@ -52,12 +53,12 @@ final class DummyBlockInputStreamWithRetry long blockLen, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverClientManager, List chunkList, Map chunkMap, - AtomicBoolean isRerfreshed, IOException ioException) { - super(blockId, blockLen, pipeline, token, verifyChecksum, + AtomicBoolean isRerfreshed, IOException ioException, + OzoneClientConfig config) { + super(blockId, blockLen, pipeline, token, xceiverClientManager, blockID -> { isRerfreshed.set(true); try { @@ -69,7 +70,7 @@ final class DummyBlockInputStreamWithRetry throw new RuntimeException(e); } - }, chunkList, chunkMap); + }, chunkList, chunkMap, config); this.ioException = ioException; } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index 4db569b7c07a..0012d691f92d 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -22,9 +22,11 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -34,6 +36,7 @@ import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.OzoneChecksumException; +import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.io.grpc.Status; import org.apache.ratis.thirdparty.io.grpc.StatusException; import org.junit.jupiter.api.BeforeEach; @@ -42,6 +45,7 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.mockito.stubbing.OngoingStubbing; +import org.slf4j.event.Level; import java.io.EOFException; import java.io.IOException; @@ -58,6 +62,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; import static org.apache.hadoop.hdds.scm.storage.TestChunkInputStream.generateRandomData; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -86,6 +91,8 @@ public class TestBlockInputStream { private Function refreshFunction; + private OzoneConfiguration conf = new OzoneConfiguration(); + @BeforeEach @SuppressWarnings("unchecked") public void setup() throws Exception { @@ -93,10 +100,12 @@ public void setup() throws Exception { BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE); createChunkList(5); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); Pipeline pipeline = MockPipeline.createSingleNodePipeline(); blockStream = new DummyBlockInputStream(blockID, blockSize, pipeline, null, - false, null, refreshFunction, chunks, chunkDataMap); + null, refreshFunction, chunks, chunkDataMap, clientConfig); } /** @@ -257,18 +266,25 @@ public void testSeekAndRead() throws Exception { @Test public void testRefreshPipelineFunction() throws Exception { + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + .captureLogs(BlockInputStream.LOG); + GenericTestUtils.setLogLevel(BlockInputStream.LOG, Level.DEBUG); BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); AtomicBoolean isRefreshed = new AtomicBoolean(); createChunkList(5); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); try (BlockInputStream blockInputStreamWithRetry = new DummyBlockInputStreamWithRetry(blockID, blockSize, MockPipeline.createSingleNodePipeline(), null, - false, null, chunks, chunkDataMap, isRefreshed, null)) { + null, chunks, chunkDataMap, isRefreshed, null, + clientConfig)) { assertFalse(isRefreshed.get()); seekAndVerify(50); byte[] b = new byte[200]; blockInputStreamWithRetry.read(b, 0, 200); + assertThat(logCapturer.getOutput()).contains("Retry read after"); assertTrue(isRefreshed.get()); } } @@ -348,8 +364,10 @@ private static ChunkInputStream throwingChunkInputStream(IOException ex, private BlockInputStream createSubject(BlockID blockID, Pipeline pipeline, ChunkInputStream stream) { - return new DummyBlockInputStream(blockID, blockSize, pipeline, null, false, - null, refreshFunction, chunks, null) { + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); + return new DummyBlockInputStream(blockID, blockSize, pipeline, null, + null, refreshFunction, chunks, null, clientConfig) { @Override protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; @@ -401,9 +419,12 @@ public void testRefreshOnReadFailureAfterUnbuffer(IOException ex) .thenReturn(blockLocationInfo); when(blockLocationInfo.getPipeline()).thenReturn(newPipeline); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); BlockInputStream subject = new BlockInputStream( new BlockLocationInfo(new BlockLocationInfo.Builder().setBlockID(blockID).setLength(blockSize)), - pipeline, null, false, clientFactory, refreshFunction) { + pipeline, null, clientFactory, refreshFunction, + clientConfig) { @Override protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java index 9b061f5392d3..bf4830c6fcb5 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java @@ -23,9 +23,13 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; @@ -42,11 +46,17 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import static java.util.concurrent.Executors.newFixedThreadPool; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -82,6 +92,66 @@ void test(final int writeSize) throws IOException { } } + /** + * Tests an EC offline reconstruction scenario in which none of the ChunkInfo in an EC stripe have stripeChecksum. + * Such ChunkInfo will exist for any EC data that was written in a version in which the ChunkInfo protobuf message did + * not have the stripeChecksum field. Here, we assert that executePutBlock during reconstruction does not throw an + * exception because of missing stripeChecksum. This essentially tests compatibility between an Ozone version that + * did not have stripeChecksum and a version that has stripeChecksum. + */ + @Test + public void testMissingStripeChecksumDoesNotMakeExecutePutBlockFailDuringECReconstruction() throws IOException { + // setup some parameters required for creating ECBlockOutputStream + OzoneClientConfig config = new OzoneClientConfig(); + ECReplicationConfig replicationConfig = new ECReplicationConfig(3, 2); + BlockID blockID = new BlockID(1, 1); + DatanodeDetails datanodeDetails = MockDatanodeDetails.randomDatanodeDetails(); + Pipeline pipeline = Pipeline.newBuilder() + .setId(PipelineID.valueOf(datanodeDetails.getUuid())) + .setReplicationConfig(replicationConfig) + .setNodes(ImmutableList.of(datanodeDetails)) + .setState(Pipeline.PipelineState.CLOSED) + // we'll executePutBlock for the parity index 5 because stripeChecksum is written to either the first or the + // parity indexes + .setReplicaIndexes(ImmutableMap.of(datanodeDetails, 5)).build(); + + BlockLocationInfo locationInfo = new BlockLocationInfo.Builder() + .setBlockID(blockID) + .setOffset(1) + .setLength(10) + .setPipeline(pipeline).build(); + + /* + The array of BlockData contains metadata about blocks and their chunks, and is read in executePutBlock. In + this test, we deliberately don't write stripeChecksum to any chunk. The expectation is that executePutBlock + should not throw an exception because of missing stripeChecksum. + */ + BlockData[] blockData = createBlockDataWithoutStripeChecksum(blockID, replicationConfig); + try (ECBlockOutputStream ecBlockOutputStream = createECBlockOutputStream(config, replicationConfig, blockID, + pipeline)) { + Assertions.assertDoesNotThrow(() -> ecBlockOutputStream.executePutBlock(true, true, locationInfo.getLength(), + blockData)); + } + } + + /** + * Creates a BlockData array with {@link ECReplicationConfig#getRequiredNodes()} number of elements. + */ + private BlockData[] createBlockDataWithoutStripeChecksum(BlockID blockID, ECReplicationConfig replicationConfig) { + int requiredNodes = replicationConfig.getRequiredNodes(); + BlockData[] blockDataArray = new BlockData[requiredNodes]; + + // add just one ChunkInfo to each BlockData. + for (int i = 0; i < requiredNodes; i++) { + BlockData data = new BlockData(blockID); + // create a ChunkInfo with no stripeChecksum + ChunkInfo chunkInfo = new ChunkInfo("abc", 0, 10); + data.addChunk(chunkInfo.getProtoBufMessage()); + blockDataArray[i] = data; + } + return blockDataArray; + } + private BlockOutputStream createBlockOutputStream(BufferPool bufferPool) throws IOException { @@ -108,7 +178,23 @@ private BlockOutputStream createBlockOutputStream(BufferPool bufferPool) bufferPool, config, null, - ContainerClientMetrics.acquire(), streamBufferArgs); + ContainerClientMetrics.acquire(), + streamBufferArgs, + () -> newFixedThreadPool(10)); + } + + private ECBlockOutputStream createECBlockOutputStream(OzoneClientConfig clientConfig, + ECReplicationConfig repConfig, BlockID blockID, Pipeline pipeline) throws IOException { + final XceiverClientManager xcm = mock(XceiverClientManager.class); + when(xcm.acquireClient(any())) + .thenReturn(new MockXceiverClientSpi(pipeline)); + + ContainerClientMetrics clientMetrics = ContainerClientMetrics.acquire(); + StreamBufferArgs streamBufferArgs = + StreamBufferArgs.getDefaultStreamBufferArgs(repConfig, clientConfig); + + return new ECBlockOutputStream(blockID, xcm, pipeline, BufferPool.empty(), clientConfig, null, + clientMetrics, streamBufferArgs, () -> newFixedThreadPool(2)); } /** diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java index 41bf46a8ea20..049037bc4dce 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -258,9 +259,10 @@ public synchronized void setFailIndexes(Integer... fail) { public synchronized BlockExtendedInputStream create( ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { int repInd = currentPipeline.getReplicaIndex(pipeline.getNodes().get(0)); TestBlockInputStream stream = new TestBlockInputStream( diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java index cf3f4f13ef94..623f7a4f86f1 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java @@ -21,9 +21,11 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -43,6 +45,8 @@ */ public class TestBlockInputStreamFactoryImpl { + private OzoneConfiguration conf = new OzoneConfiguration(); + @Test public void testNonECGivesBlockInputStream() { BlockInputStreamFactory factory = new BlockInputStreamFactoryImpl(); @@ -52,9 +56,12 @@ public void testNonECGivesBlockInputStream() { BlockLocationInfo blockInfo = createKeyLocationInfo(repConfig, 3, 1024 * 1024 * 10); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), - blockInfo.getToken(), true, null, null); + blockInfo.getToken(), null, null, + clientConfig); assertInstanceOf(BlockInputStream.class, stream); assertEquals(stream.getBlockID(), blockInfo.getBlockID()); assertEquals(stream.getLength(), blockInfo.getLength()); @@ -69,9 +76,12 @@ public void testECGivesECBlockInputStream() { BlockLocationInfo blockInfo = createKeyLocationInfo(repConfig, 5, 1024 * 1024 * 10); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), - blockInfo.getToken(), true, null, null); + blockInfo.getToken(), null, null, + clientConfig); assertInstanceOf(ECBlockInputStreamProxy.class, stream); assertEquals(stream.getBlockID(), blockInfo.getBlockID()); assertEquals(stream.getLength(), blockInfo.getLength()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java index bd34e7546c12..60974b35a95c 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java @@ -20,9 +20,11 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -59,6 +61,7 @@ public class TestECBlockInputStream { private ECReplicationConfig repConfig; private TestBlockInputStreamFactory streamFactory; + private OzoneConfiguration conf = new OzoneConfiguration(); @BeforeEach public void setup() { @@ -72,15 +75,19 @@ public void testSufficientLocations() { // EC-3-2, 5MB block, so all 3 data locations are needed BlockLocationInfo keyInfo = ECStreamTestUtil .createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertTrue(ecb.hasSufficientLocations()); } // EC-3-2, very large block, so all 3 data locations are needed keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5000 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertTrue(ecb.hasSufficientLocations()); } @@ -90,7 +97,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 1); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB - 1, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertTrue(ecb.hasSufficientLocations()); } @@ -100,7 +108,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 1); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertFalse(ecb.hasSufficientLocations()); } @@ -112,7 +121,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 5); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertFalse(ecb.hasSufficientLocations()); } } @@ -124,8 +134,11 @@ public void testCorrectBlockSizePassedToBlockStreamLessThanCell() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB - 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); // We expect only 1 block stream and it should have a length passed of // ONEMB - 100. @@ -141,8 +154,11 @@ public void testCorrectBlockSizePassedToBlockStreamTwoCells() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB + 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -157,8 +173,11 @@ public void testCorrectBlockSizePassedToBlockStreamThreeCells() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 2 * ONEMB + 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -174,8 +193,11 @@ public void testCorrectBlockSizePassedToBlockStreamThreeFullAndPartialStripe() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 10 * ONEMB + 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(4 * ONEMB, streams.get(0).getLength()); @@ -191,8 +213,11 @@ public void testCorrectBlockSizePassedToBlockStreamSingleFullCell() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -206,8 +231,11 @@ public void testCorrectBlockSizePassedToBlockStreamSeveralFullCells() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 9 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(3 * ONEMB, streams.get(0).getLength()); @@ -220,8 +248,11 @@ public void testCorrectBlockSizePassedToBlockStreamSeveralFullCells() public void testSimpleRead() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -243,8 +274,11 @@ public void testSimpleRead() throws IOException { public void testSimpleReadUnderOneChunk() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 1, ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -262,8 +296,11 @@ public void testSimpleReadUnderOneChunk() throws IOException { public void testReadPastEOF() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 50); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -281,8 +318,11 @@ public void testReadCrossingMultipleECChunkBounds() throws IOException { 100); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { // EC Chunk size is 100 and 3-2. Create a byte buffer to read 3.5 chunks, // so 350 @@ -316,8 +356,11 @@ public void testSeekPastBlockLength() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { assertThrows(EOFException.class, () -> ecb.seek(1000)); } } @@ -328,8 +371,11 @@ public void testSeekToLength() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { // When seek more than the length, should throw EOFException. assertThrows(EOFException.class, () -> ecb.seek(101)); } @@ -341,8 +387,11 @@ public void testSeekToLengthZeroLengthBlock() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 0); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.seek(0); assertEquals(0, ecb.getPos()); assertEquals(0, ecb.getRemaining()); @@ -355,8 +404,11 @@ public void testSeekToValidPosition() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.seek(ONEMB - 1); assertEquals(ONEMB - 1, ecb.getPos()); assertEquals(ONEMB * 4 + 1, ecb.getRemaining()); @@ -384,8 +436,11 @@ public void testErrorReadingBlockReportsBadLocation() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { // Read a full stripe to ensure all streams are created in the stream // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); @@ -415,8 +470,11 @@ public void testNoErrorIfSpareLocationToRead() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 8 * ONEMB, datanodes); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { // Read a full stripe to ensure all streams are created in the stream // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); @@ -479,8 +537,11 @@ public void testEcPipelineRefreshFunction() { return blockLocation; }; + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { Pipeline pipeline = ecb.ecPipelineRefreshFunction(3, refreshFunction) .apply(blockID) @@ -513,8 +574,9 @@ public synchronized List getBlockStreams() { public synchronized BlockExtendedInputStream create( ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + XceiverClientFactory xceiverFactory, + Function refreshFunction, + OzoneClientConfig config) { TestBlockInputStream stream = new TestBlockInputStream( blockInfo.getBlockID(), blockInfo.getLength(), (byte)blockStreams.size()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java index 97bf71c204ad..ca0b9710a960 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java @@ -20,7 +20,9 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -52,6 +54,7 @@ public class TestECBlockInputStreamProxy { private long randomSeed; private ThreadLocalRandom random = ThreadLocalRandom.current(); private SplittableRandom dataGenerator; + private OzoneConfiguration conf = new OzoneConfiguration(); @BeforeEach public void setup() { @@ -342,8 +345,11 @@ private void resetAndAdvanceDataGenerator(long position) { private ECBlockInputStreamProxy createBISProxy(ECReplicationConfig rConfig, BlockLocationInfo blockInfo) { + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); return new ECBlockInputStreamProxy( - rConfig, blockInfo, true, null, null, streamFactory); + rConfig, blockInfo, null, null, streamFactory, + clientConfig); } private static class TestECBlockInputStreamFactory @@ -372,8 +378,9 @@ public List getFailedLocations() { public BlockExtendedInputStream create(boolean missingLocations, List failedDatanodes, ReplicationConfig repConfig, BlockLocationInfo blockInfo, - boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + XceiverClientFactory xceiverFactory, + Function refreshFunction, + OzoneClientConfig config) { this.failedLocations = failedDatanodes; ByteBuffer wrappedBuffer = ByteBuffer.wrap(data.array(), 0, data.capacity()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java index 0425f6943a48..6b60bef66af4 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java @@ -19,7 +19,9 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; @@ -54,6 +56,7 @@ public class TestECBlockReconstructedInputStream { private ByteBufferPool bufferPool = new ElasticByteBufferPool(); private ExecutorService ecReconstructExecutor = Executors.newFixedThreadPool(3); + private OzoneConfiguration conf = new OzoneConfiguration(); @BeforeEach public void setup() throws IOException { @@ -74,8 +77,11 @@ private ECBlockReconstructedStripeInputStream createStripeInputStream( BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap); streamFactory.setCurrentPipeline(keyInfo.getPipeline()); - return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, true, - null, null, streamFactory, bufferPool, ecReconstructExecutor); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); + return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, + null, null, streamFactory, bufferPool, ecReconstructExecutor, + clientConfig); } @Test diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index f7a4bb0643ec..e526b12a5142 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -20,8 +20,10 @@ import com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; @@ -73,7 +75,8 @@ public class TestECBlockReconstructedStripeInputStream { private ByteBufferPool bufferPool = new ElasticByteBufferPool(); private ExecutorService ecReconstructExecutor = Executors.newFixedThreadPool(3); - + private OzoneConfiguration conf = new OzoneConfiguration(); + static List> recoveryCases() { // TODO better name List> params = new ArrayList<>(); params.add(emptySet()); // non-recovery @@ -808,8 +811,11 @@ public void testFailedLocationsAreNotRead() throws IOException { private ECBlockReconstructedStripeInputStream createInputStream( BlockLocationInfo keyInfo) { - return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, true, - null, null, streamFactory, bufferPool, ecReconstructExecutor); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); + return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, + null, null, streamFactory, bufferPool, ecReconstructExecutor, + clientConfig); } private void addDataStreamsToFactory(ByteBuffer[] data, ByteBuffer[] parity) { diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 20dce15d4d1b..2e0c96ac1d64 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -135,10 +135,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.commons - commons-pool2 - org.bouncycastle bcpkix-jdk18on @@ -181,6 +177,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> grpc-api ${io.grpc.version} compile + + + com.google.code.findbugs + jsr305 + + @@ -200,11 +202,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - org.junit.jupiter - junit-jupiter-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-hdds/common/src/main/java/com/google/protobuf/Proto2Utils.java b/hadoop-hdds/common/src/main/java/com/google/protobuf/Proto2Utils.java new file mode 100644 index 000000000000..73cbd3e66f15 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/com/google/protobuf/Proto2Utils.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.protobuf; + +/** Utilities for protobuf v2. */ +public final class Proto2Utils { + /** + * Similar to {@link ByteString#copyFrom(byte[])} except that this method does not copy. + * This method is safe only if the content of the array remains unchanged. + * Otherwise, it violates the immutability of {@link ByteString}. + */ + public static ByteString unsafeByteString(byte[] array) { + return array != null && array.length > 0 ? new LiteralByteString(array) : ByteString.EMPTY; + } + + private Proto2Utils() { } +} diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp b/hadoop-hdds/common/src/main/java/com/google/protobuf/package-info.java similarity index 70% rename from hadoop-hdds/rocks-native/src/main/native/Pipe.cpp rename to hadoop-hdds/common/src/main/java/com/google/protobuf/package-info.java index f1dd54438700..0cabebdb6965 100644 --- a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp +++ b/hadoop-hdds/common/src/main/java/com/google/protobuf/package-info.java @@ -16,22 +16,7 @@ * limitations under the License. */ -#include "Pipe.h" -#include - -const int Pipe::READ_FILE_DESCRIPTOR_IDX = 0; -const int Pipe::WRITE_FILE_DESCRIPTOR_IDX = 1; - -Pipe::Pipe() { - pipe(p); - open = true; -} - -Pipe::~Pipe() { - ::close(p[Pipe::READ_FILE_DESCRIPTOR_IDX]); - ::close(p[Pipe::WRITE_FILE_DESCRIPTOR_IDX]); -} - -void Pipe::close() { - open = false; -} +/** + * Classes using the protobuf internal APIs. + */ +package com.google.protobuf; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DatanodeVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DatanodeVersion.java index 30f9df597b51..e35d20d53e15 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DatanodeVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DatanodeVersion.java @@ -31,6 +31,8 @@ public enum DatanodeVersion implements ComponentVersion { DEFAULT_VERSION(0, "Initial version"), SEPARATE_RATIS_PORTS_AVAILABLE(1, "Version with separated Ratis port."), + COMBINED_PUTBLOCK_WRITECHUNK_RPC(2, "WriteChunk can optionally support " + + "a PutBlock request"), FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 787f023df2ea..609baeeaf7fa 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -152,6 +152,11 @@ public final class HddsConfigKeys { + ".name"; public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem"; + public static final String HDDS_HTTP_SERVER_KEYSTORE_TYPE = "ssl.server.keystore.type"; + public static final String HDDS_HTTP_SERVER_KEYSTORE_TYPE_DEFAULT = "jks"; + public static final String HDDS_HTTP_SERVER_TRUSTSTORE_TYPE = "ssl.server.truststore.type"; + public static final String HDDS_HTTP_SERVER_TRUSTSTORE_TYPE_DEFAULT = "jks"; + public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME = "hdds.block.token.expiry.time"; public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT = "1d"; @@ -338,6 +343,9 @@ private HddsConfigKeys() { HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL = "hdds.security.client.scm.secretkey.datanode.protocol.acl"; + public static final String OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL = + "ozone.security.reconfigure.protocol.acl"; + // Determines if the Container Chunk Manager will write user data to disk // Set to false only for specific performance tests public static final String HDDS_CONTAINER_PERSISTDATA = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index ee1c9669a1b1..794b972f1509 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -438,6 +438,9 @@ public static boolean isReadOnly( case StreamInit: case StreamWrite: case FinalizeBlock: + return false; + case Echo: + return proto.getEcho().hasReadOnly() && proto.getEcho().getReadOnly(); default: return false; } @@ -807,7 +810,7 @@ public static Map processForLogging(OzoneConfiguration conf) { } @Nonnull - public static String threadNamePrefix(@Nullable String id) { + public static String threadNamePrefix(@Nullable Object id) { return id != null && !"".equals(id) ? id + "-" : ""; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java new file mode 100644 index 000000000000..7d5b610b0875 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.client; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import jakarta.annotation.Nullable; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Decommission specific stateless utility functions. + */ +@InterfaceAudience.Private +@InterfaceStability.Stable +public final class DecommissionUtils { + + + private static final Logger LOG = LoggerFactory.getLogger(DecommissionUtils.class); + + private DecommissionUtils() { + } + + /** + * Returns the list of uuid or ipAddress matching decommissioning status nodes. + * + * @param allNodes All datanodes which are in decommissioning status. + * @param uuid node uuid. + * @param ipAddress node ipAddress + * @return the list of uuid or ipAddress matching decommissioning status nodes. + */ + public static List getDecommissioningNodesList(Stream allNodes, + String uuid, + String ipAddress) { + List decommissioningNodes; + if (!Strings.isNullOrEmpty(uuid)) { + decommissioningNodes = allNodes.filter(p -> p.getNodeID().getUuid() + .equals(uuid)).collect(Collectors.toList()); + } else if (!Strings.isNullOrEmpty(ipAddress)) { + decommissioningNodes = allNodes.filter(p -> p.getNodeID().getIpAddress() + .compareToIgnoreCase(ipAddress) == 0).collect(Collectors.toList()); + } else { + decommissioningNodes = allNodes.collect(Collectors.toList()); + } + return decommissioningNodes; + } + + /** + * Returns Json node of datanode metrics. + * + * @param metricsJson + * @return Json node of datanode metrics + * @throws IOException + */ + public static JsonNode getBeansJsonNode(String metricsJson) throws IOException { + JsonNode jsonNode; + ObjectMapper objectMapper = new ObjectMapper(); + JsonFactory factory = objectMapper.getFactory(); + JsonParser parser = factory.createParser(metricsJson); + jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0); + return jsonNode; + } + + /** + * Returns the number of decommissioning nodes. + * + * @param jsonNode + * @return + */ + public static int getNumDecomNodes(JsonNode jsonNode) { + int numDecomNodes; + JsonNode totalDecom = jsonNode.get("DecommissioningMaintenanceNodesTotal"); + numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString())); + return numDecomNodes; + } + + /** + * Returns the counts of following info attributes. + * - decommissionStartTime + * - numOfUnclosedPipelines + * - numOfUnderReplicatedContainers + * - numOfUnclosedContainers + * + * @param datanode + * @param counts + * @param numDecomNodes + * @param countsMap + * @param errMsg + * @return + * @throws IOException + */ + @Nullable + public static Map getCountsMap(DatanodeDetails datanode, JsonNode counts, int numDecomNodes, + Map countsMap, String errMsg) + throws IOException { + for (int i = 1; i <= numDecomNodes; i++) { + if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) { + JsonNode pipelinesDN = counts.get("PipelinesWaitingToCloseDN." + i); + JsonNode underReplicatedDN = counts.get("UnderReplicatedDN." + i); + JsonNode unclosedDN = counts.get("UnclosedContainersDN." + i); + JsonNode startTimeDN = counts.get("StartTimeDN." + i); + if (pipelinesDN == null || underReplicatedDN == null || unclosedDN == null || startTimeDN == null) { + throw new IOException(errMsg); + } + + int pipelines = Integer.parseInt(pipelinesDN.toString()); + double underReplicated = Double.parseDouble(underReplicatedDN.toString()); + double unclosed = Double.parseDouble(unclosedDN.toString()); + long startTime = Long.parseLong(startTimeDN.toString()); + Date date = new Date(startTime); + DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); + countsMap.put("decommissionStartTime", formatter.format(date)); + countsMap.put("numOfUnclosedPipelines", pipelines); + countsMap.put("numOfUnderReplicatedContainers", underReplicated); + countsMap.put("numOfUnclosedContainers", unclosed); + return countsMap; + } + } + return null; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DefaultReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DefaultReplicationConfig.java index 3e3990b8b4ba..18a931546d1b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DefaultReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DefaultReplicationConfig.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.client; +import net.jcip.annotations.Immutable; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.util.Objects; @@ -24,6 +25,7 @@ /** * Replication configuration for EC replication. */ +@Immutable public class DefaultReplicationConfig { private final ECReplicationConfig ecReplicationConfig; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java index 25ea315af284..a6dbd933ff1d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.client; import com.fasterxml.jackson.annotation.JsonIgnore; +import net.jcip.annotations.Immutable; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.util.EnumSet; @@ -30,6 +31,7 @@ /** * Replication configuration for EC replication. */ +@Immutable public class ECReplicationConfig implements ReplicationConfig { public static final String EC_REPLICATION_PARAMS_DELIMITER = "-"; @@ -60,17 +62,16 @@ public static String allValuesAsString() { private static final Pattern STRING_FORMAT = Pattern.compile("([a-zA-Z]+)-(\\d+)-(\\d+)-(\\d+)([kK])?"); - private int data; + private final int data; - private int parity; + private final int parity; - private int ecChunkSize = 1024 * 1024; + private final int ecChunkSize; - private EcCodec codec = EcCodec.RS; + private final EcCodec codec; public ECReplicationConfig(int data, int parity) { - this.data = data; - this.parity = parity; + this(data, parity, EcCodec.RS, 1024 * 1024); } public ECReplicationConfig(int data, int parity, EcCodec codec, @@ -121,7 +122,7 @@ public ECReplicationConfig(String string) { ") be greater than zero"); } if (matcher.group(5) != null) { - // The "k" modifier is present, so multiple by 1024 + // The "k" modifier is present, so multiply by 1024 chunkSize = chunkSize * 1024; } ecChunkSize = chunkSize; @@ -154,6 +155,14 @@ public String getReplication() { + chunkKB(); } + /** Similar to {@link #getReplication()}, but applies to proto structure, without any validation. */ + public static String toString(HddsProtos.ECReplicationConfig proto) { + return proto.getCodec() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getData() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getParity() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getEcChunkSize(); + } + public HddsProtos.ECReplicationConfig toProto() { return HddsProtos.ECReplicationConfig.newBuilder() .setData(data) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java index b3a762e2eda1..c8cf4fdd42ba 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +19,13 @@ package org.apache.hadoop.hdds.client; import com.google.common.base.Strings; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.ratis.util.Preconditions; -import static org.apache.hadoop.ozone.OzoneConsts.GB; -import static org.apache.hadoop.ozone.OzoneConsts.KB; -import static org.apache.hadoop.ozone.OzoneConsts.MB; -import static org.apache.hadoop.ozone.OzoneConsts.TB; - +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; /** * represents an OzoneQuota Object that can be applied to @@ -32,39 +33,71 @@ */ public final class OzoneQuota { - public static final String OZONE_QUOTA_B = "B"; - public static final String OZONE_QUOTA_KB = "KB"; - public static final String OZONE_QUOTA_MB = "MB"; - public static final String OZONE_QUOTA_GB = "GB"; - public static final String OZONE_QUOTA_TB = "TB"; - /** Quota Units.*/ - public enum Units { B, KB, MB, GB, TB } + public enum Units { + // the names and the ordering are important + B(1), + KB(OzoneConsts.KB), + MB(OzoneConsts.MB), + GB(OzoneConsts.GB), + TB(OzoneConsts.TB), + PB(OzoneConsts.PB), + EB(OzoneConsts.EB); + + private final long size; + private final List cache; + + Units(long size) { + this.size = size; + this.cache = createCache(this); + } - // Quota to decide how many buckets can be created. - private long quotaInNamespace; - // Quota to decide how many storage space will be used in bytes. - private long quotaInBytes; - private RawQuotaInBytes rawQuotaInBytes; - // Data class of Quota. - private static QuotaList quotaList; + private static List createCache(Units unit) { + final List quotas = new ArrayList<>(1024); + for (int i = 0; i < 1024; i++) { + quotas.add(new RawQuotaInBytes(unit, i)); + } + return Collections.unmodifiableList(quotas); + } + + public long getSize() { + return size; + } - /** Setting QuotaList parameters from large to small. */ + RawQuotaInBytes getRawQuotaInBytes(long b) { + return b < cache.size() ? cache.get(Math.toIntExact(b)) + : new RawQuotaInBytes(this, b); + } + } + + private static final List PARSE_ORDER; static { - quotaList = new QuotaList(); - quotaList.addQuotaList(OZONE_QUOTA_TB, Units.TB, TB); - quotaList.addQuotaList(OZONE_QUOTA_GB, Units.GB, GB); - quotaList.addQuotaList(OZONE_QUOTA_MB, Units.MB, MB); - quotaList.addQuotaList(OZONE_QUOTA_KB, Units.KB, KB); - quotaList.addQuotaList(OZONE_QUOTA_B, Units.B, 1L); + List reversed = new ArrayList<>(Arrays.asList(Units.values())); + Collections.reverse(reversed); + PARSE_ORDER = Collections.unmodifiableList(reversed); } + // Quota to decide how many buckets can be created. + private long quotaInNamespace; + // Quota to decide how many storage space will be used in bytes. + private final long quotaInBytes; + private final RawQuotaInBytes rawQuotaInBytes; + /** * Used to convert user input values into bytes such as: 1MB-> 1048576. */ private static class RawQuotaInBytes { - private Units unit; - private long size; + static RawQuotaInBytes valueOf(long quotaInBytes) { + Preconditions.assertTrue(quotaInBytes >= 0, () -> "quotaInBytes = " + quotaInBytes + " must be >= 0"); + final int i = Long.numberOfTrailingZeros(quotaInBytes) / 10; + final Units unit = Units.values()[i]; + final RawQuotaInBytes b = unit.getRawQuotaInBytes(quotaInBytes >> (i * 10)); + Preconditions.assertSame(quotaInBytes, b.sizeInBytes(), "sizeInBytes"); + return b; + } + + private final Units unit; + private final long size; RawQuotaInBytes(Units unit, long size) { this.unit = unit; @@ -83,14 +116,7 @@ public long getSize() { * Returns size in Bytes or negative num if there is no Quota. */ public long sizeInBytes() { - long sQuota = -1L; - for (Units quota : quotaList.getUnitQuotaArray()) { - if (quota == this.unit) { - sQuota = quotaList.getQuotaSize(quota); - break; - } - } - return this.getSize() * sQuota; + return this.getSize() * getUnit().getSize(); } @Override @@ -158,20 +184,21 @@ public static OzoneQuota parseSpaceQuota(String quotaInBytes) { String uppercase = quotaInBytes.toUpperCase() .replaceAll("\\s+", ""); String size = ""; - long nSize = 0; + final long nSize; Units currUnit = Units.B; try { - for (String quota : quotaList.getOzoneQuotaArray()) { + for (Units unit : PARSE_ORDER) { + final String quota = unit.name(); if (uppercase.endsWith((quota))) { size = uppercase .substring(0, uppercase.length() - quota.length()); - currUnit = quotaList.getUnits(quota); + currUnit = unit; break; } } // there might be no unit specified. - if (size.equals("")) { + if (size.isEmpty()) { size = uppercase; } nSize = Long.parseLong(size); @@ -240,15 +267,7 @@ public static OzoneQuota parseQuota(String quotaInBytes, */ public static OzoneQuota getOzoneQuota(long quotaInBytes, long quotaInNamespace) { - long size = 1L; - Units unit = Units.B; - for (Long quota : quotaList.getSizeQuotaArray()) { - if (quotaInBytes % quota == 0) { - size = quotaInBytes / quota; - unit = quotaList.getQuotaUnit(quota); - } - } - return new OzoneQuota(quotaInNamespace, new RawQuotaInBytes(unit, size)); + return new OzoneQuota(quotaInNamespace, RawQuotaInBytes.valueOf(quotaInBytes)); } public long getQuotaInNamespace() { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java deleted file mode 100644 index 230b825f4d45..000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.client; - -import java.util.ArrayList; -import java.util.List; - -/** - *This class contains arraylist for storage constant used in OzoneQuota. - */ -public class QuotaList { - private final ArrayList ozoneQuota; - private final ArrayList unitQuota; - private final ArrayList sizeQuota; - - public QuotaList() { - ozoneQuota = new ArrayList<>(); - unitQuota = new ArrayList<>(); - sizeQuota = new ArrayList<>(); - } - - public void addQuotaList( - String oQuota, OzoneQuota.Units uQuota, Long sQuota) { - ozoneQuota.add(oQuota); - unitQuota.add(uQuota); - sizeQuota.add(sQuota); - } - - public List getOzoneQuotaArray() { - return this.ozoneQuota; - } - - public List getSizeQuotaArray() { - return this.sizeQuota; - } - - public List getUnitQuotaArray() { - return this.unitQuota; - } - - public OzoneQuota.Units getUnits(String oQuota) { - return unitQuota.get(ozoneQuota.indexOf(oQuota)); - } - - public Long getQuotaSize(OzoneQuota.Units uQuota) { - return sizeQuota.get(unitQuota.indexOf(uQuota)); - } - - public OzoneQuota.Units getQuotaUnit(Long sQuota) { - return unitQuota.get(sizeQuota.indexOf(sQuota)); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java index 377f8cd998a7..36d4d90e1afb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; +import net.jcip.annotations.Immutable; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; @@ -31,6 +32,7 @@ /** * Replication configuration for Ratis replication. */ +@Immutable public final class RatisReplicationConfig implements ReplicatedReplicationConfig { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java index e9d77e9bebaf..9ca2dfb538a9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; +import net.jcip.annotations.Immutable; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; @@ -31,6 +32,7 @@ /** * Replication configuration for STANDALONE replication. */ +@Immutable public final class StandaloneReplicationConfig implements ReplicatedReplicationConfig { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index 69cce8db6d6b..b8742c6ba929 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -30,6 +30,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.List; @@ -47,6 +48,7 @@ import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.ratis.server.RaftServerConfigKeys; import static java.util.Collections.unmodifiableSortedSet; @@ -162,15 +164,11 @@ public XMLConfiguration() { } public XMLConfiguration(List properties) { - this.properties = properties; + this.properties = new ArrayList<>(properties); } public List getProperties() { - return properties; - } - - public void setProperties(List properties) { - this.properties = properties; + return Collections.unmodifiableList(properties); } } @@ -323,7 +321,67 @@ private static void addDeprecatedKeys() { new DeprecationDelta("ozone.scm.chunk.layout", ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY), new DeprecationDelta("hdds.datanode.replication.work.dir", - OZONE_CONTAINER_COPY_WORKDIR) + OZONE_CONTAINER_COPY_WORKDIR), + new DeprecationDelta("dfs.container.chunk.write.sync", + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY), + new DeprecationDelta("dfs.container.ipc", + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT), + new DeprecationDelta("dfs.container.ipc.random.port", + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.admin.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT), + new DeprecationDelta("dfs.container.ratis.datanode.storage.dir", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR), + new DeprecationDelta("dfs.container.ratis.datastream.enabled", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED), + new DeprecationDelta("dfs.container.ratis.datastream.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT), + new DeprecationDelta("dfs.container.ratis.datastream.random.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.enabled", + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY), + new DeprecationDelta("dfs.container.ratis.ipc", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT), + new DeprecationDelta("dfs.container.ratis.ipc.random.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS), + new DeprecationDelta("dfs.container.ratis.log.purge.gap", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP), + new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.queue.num-elements", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS), + new DeprecationDelta("dfs.container.ratis.num.container.op.executors", + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY), + new DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume", + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME), + new DeprecationDelta("dfs.container.ratis.replication.level", + ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY), + new DeprecationDelta("dfs.container.ratis.rpc.type", + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY), + new DeprecationDelta("dfs.container.ratis.segment.preallocated.size", + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.segment.size", + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.server.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT), + new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries", + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES), + new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout", + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT), + new DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions", + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS), + new DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration", + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY), + new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration", + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY), + new DeprecationDelta("dfs.ratis.snapshot.threshold", + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY) }); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java index 2d29dc8565c8..ba203f9c8e2d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.freon; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Random; import java.util.UUID; @@ -36,40 +37,49 @@ * Class to store pre-generated topology information for load-tests. */ @SuppressWarnings("java:S2245") // no need for secure random -public class FakeClusterTopology { +public final class FakeClusterTopology { private static final Logger LOGGER = LoggerFactory.getLogger(FakeClusterTopology.class); - public static final FakeClusterTopology INSTANCE = new FakeClusterTopology(); + public static final FakeClusterTopology INSTANCE = newFakeClusterTopology(); - private List datanodes = new ArrayList<>(); + private final List datanodes; - private List pipelines = new ArrayList<>(); + private final List pipelines; - private Random random = new Random(); + private final Random random = new Random(); - public FakeClusterTopology() { + private static FakeClusterTopology newFakeClusterTopology() { + final int nodeCount = 9; + final List datanodes = new ArrayList<>(nodeCount); + final List pipelines = new ArrayList<>(nodeCount / 3); try { - for (int i = 0; i < 9; i++) { + for (int i = 0; i < nodeCount; i++) { datanodes.add(createDatanode()); if ((i + 1) % 3 == 0) { pipelines.add(Pipeline.newBuilder() .setId(PipelineID.randomId().getProtobuf()) .setFactor(ReplicationFactor.THREE) .setType(ReplicationType.RATIS) - .addMembers(getDatanode(i - 2)) - .addMembers(getDatanode(i - 1)) - .addMembers(getDatanode(i)) + .addMembers(datanodes.get(i - 2)) + .addMembers(datanodes.get(i - 1)) + .addMembers(datanodes.get(i)) .build()); } } } catch (Exception ex) { LOGGER.error("Can't initialize FakeClusterTopology", ex); } + return new FakeClusterTopology(datanodes, pipelines); } - private DatanodeDetailsProto createDatanode() { + private FakeClusterTopology(List datanodes, List pipelines) { + this.datanodes = Collections.unmodifiableList(datanodes); + this.pipelines = Collections.unmodifiableList(pipelines); + } + + private static DatanodeDetailsProto createDatanode() { return DatanodeDetailsProto.newBuilder() .setUuid(UUID.randomUUID().toString()) .setHostName("localhost") @@ -79,15 +89,11 @@ private DatanodeDetailsProto createDatanode() { .build(); } - public DatanodeDetailsProto getDatanode(int i) { - return datanodes.get(i); - } - public Pipeline getRandomPipeline() { return pipelines.get(random.nextInt(pipelines.size())); } - public List getAllDatanodes() { + public Iterable getAllDatanodes() { return datanodes; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java index 6f776072d9c3..b9a2f87a03da 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java @@ -94,7 +94,19 @@ public void incrementUsedSpace(long usedSpace) { } public void decrementUsedSpace(long reclaimedSpace) { - cachedValue.addAndGet(-1 * reclaimedSpace); + cachedValue.updateAndGet(current -> { + long newValue = current - reclaimedSpace; + if (newValue < 0) { + if (current > 0) { + LOG.warn("Attempted to decrement used space to a negative value. " + + "Current: {}, Decrement: {}, Source: {}", + current, reclaimedSpace, source); + } + return 0; + } else { + return newValue; + } + }); } public void start() { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java index c25c0a40c53e..a367cfbdc061 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java @@ -57,9 +57,9 @@ final class Fixed implements SpaceUsageSource { private final long available; private final long used; - Fixed(long capacity, long available, long used) { + public Fixed(long capacity, long available, long used) { this.capacity = capacity; - this.available = available; + this.available = Math.max(Math.min(available, capacity - used), 0); this.used = used; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 5b6fb6fe9b81..68a640e6e9fc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -26,8 +26,11 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.google.common.collect.ImmutableSet; +import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.scm.net.NetUtils; +import org.apache.hadoop.util.StringWithByteString; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; @@ -81,75 +84,58 @@ public static Codec getCodec() { * DataNode's unique identifier in the cluster. */ private final UUID uuid; - private final String uuidString; + private final StringWithByteString uuidString; private final String threadNamePrefix; - - private String ipAddress; - private String hostName; - private List ports; + private StringWithByteString ipAddress; + private StringWithByteString hostName; + private final List ports; private String certSerialId; private String version; private long setupTime; private String revision; private String buildDate; private volatile HddsProtos.NodeOperationalState persistedOpState; - private volatile long persistedOpStateExpiryEpochSec = 0; + private volatile long persistedOpStateExpiryEpochSec; private int initialVersion; private int currentVersion; - /** - * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used - * for instantiating DatanodeDetails. - * @param uuid DataNode's UUID - * @param ipAddress IP Address of this DataNode - * @param hostName DataNode's hostname - * @param networkLocation DataNode's network location path - * @param ports Ports used by the DataNode - * @param certSerialId serial id from SCM issued certificate. - * @param version DataNode's version - * @param setupTime the setup time of DataNode - * @param revision DataNodes's revision - * @param buildDate DataNodes's build timestamp - * @param persistedOpState Operational State stored on DN. - * @param persistedOpStateExpiryEpochSec Seconds after the epoch the stored - * state should expire. - */ - @SuppressWarnings("parameternumber") - private DatanodeDetails(UUID uuid, String ipAddress, String hostName, - String networkLocation, List ports, String certSerialId, - String version, long setupTime, String revision, String buildDate, - HddsProtos.NodeOperationalState persistedOpState, - long persistedOpStateExpiryEpochSec, - int initialVersion, int currentVersion) { - super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); - this.uuid = uuid; - this.uuidString = uuid.toString(); + private DatanodeDetails(Builder b) { + super(b.hostName, b.networkLocation, NetConstants.NODE_COST_DEFAULT); + uuid = b.id; + uuidString = StringWithByteString.valueOf(uuid.toString()); threadNamePrefix = HddsUtils.threadNamePrefix(uuidString); - this.ipAddress = ipAddress; - this.hostName = hostName; - this.ports = ports; - this.certSerialId = certSerialId; - this.version = version; - this.setupTime = setupTime; - this.revision = revision; - this.buildDate = buildDate; - this.persistedOpState = persistedOpState; - this.persistedOpStateExpiryEpochSec = persistedOpStateExpiryEpochSec; - this.initialVersion = initialVersion; - this.currentVersion = currentVersion; + ipAddress = b.ipAddress; + hostName = b.hostName; + ports = b.ports; + certSerialId = b.certSerialId; + version = b.version; + setupTime = b.setupTime; + revision = b.revision; + buildDate = b.buildDate; + persistedOpState = b.persistedOpState; + persistedOpStateExpiryEpochSec = b.persistedOpStateExpiryEpochSec; + initialVersion = b.initialVersion; + currentVersion = b.currentVersion; + if (b.networkName != null) { + setNetworkName(b.networkName); + } + if (b.level > 0) { + setLevel(b.level); + } } public DatanodeDetails(DatanodeDetails datanodeDetails) { - super(datanodeDetails.getHostName(), datanodeDetails.getNetworkLocation(), + super(datanodeDetails.getHostNameAsByteString(), datanodeDetails.getNetworkLocationAsByteString(), datanodeDetails.getParent(), datanodeDetails.getLevel(), datanodeDetails.getCost()); this.uuid = datanodeDetails.uuid; - this.uuidString = uuid.toString(); + this.uuidString = datanodeDetails.uuidString; threadNamePrefix = HddsUtils.threadNamePrefix(uuidString); this.ipAddress = datanodeDetails.ipAddress; this.hostName = datanodeDetails.hostName; this.ports = datanodeDetails.ports; - this.setNetworkName(datanodeDetails.getNetworkName()); + this.certSerialId = datanodeDetails.certSerialId; + this.setNetworkName(datanodeDetails.getNetworkNameAsByteString()); this.setParent(datanodeDetails.getParent()); this.version = datanodeDetails.version; this.setupTime = datanodeDetails.setupTime; @@ -177,7 +163,7 @@ public UUID getUuid() { * @return UUID of DataNode */ public String getUuidString() { - return uuidString; + return uuidString.getString(); } /** @@ -186,7 +172,7 @@ public String getUuidString() { * @param ip IP Address */ public void setIpAddress(String ip) { - this.ipAddress = ip; + this.ipAddress = StringWithByteString.valueOf(ip); } /** @@ -195,6 +181,15 @@ public void setIpAddress(String ip) { * @return IP address */ public String getIpAddress() { + return ipAddress.getString(); + } + + /** + * Returns IP address of DataNode as a StringWithByteString object. + * + * @return IP address as ByteString + */ + public StringWithByteString getIpAddressAsByteString() { return ipAddress; } @@ -204,7 +199,7 @@ public String getIpAddress() { * @param host hostname */ public void setHostName(String host) { - this.hostName = host; + this.hostName = StringWithByteString.valueOf(host); } /** @@ -213,6 +208,15 @@ public void setHostName(String host) { * @return Hostname */ public String getHostName() { + return hostName.getString(); + } + + /** + * Returns IP address of DataNode as a StringWithByteString object. + * + * @return Hostname + */ + public StringWithByteString getHostNameAsByteString() { return hostName; } @@ -238,7 +242,16 @@ public synchronized void setPort(Name name, int port) { * @return DataNode Ports */ public synchronized List getPorts() { - return ports; + return new ArrayList<>(ports); + } + + public synchronized boolean hasPort(int port) { + for (Port p : ports) { + if (p.getValue() == port) { + return true; + } + } + return false; } /** @@ -343,10 +356,10 @@ public static DatanodeDetails.Builder newBuilder( } if (datanodeDetailsProto.hasIpAddress()) { - builder.setIpAddress(datanodeDetailsProto.getIpAddress()); + builder.setIpAddress(datanodeDetailsProto.getIpAddress(), datanodeDetailsProto.getIpAddressBytes()); } if (datanodeDetailsProto.hasHostName()) { - builder.setHostName(datanodeDetailsProto.getHostName()); + builder.setHostName(datanodeDetailsProto.getHostName(), datanodeDetailsProto.getHostNameBytes()); } if (datanodeDetailsProto.hasCertSerialId()) { builder.setCertSerialId(datanodeDetailsProto.getCertSerialId()); @@ -359,10 +372,15 @@ public static DatanodeDetails.Builder newBuilder( } } if (datanodeDetailsProto.hasNetworkName()) { - builder.setNetworkName(datanodeDetailsProto.getNetworkName()); + builder.setNetworkName( + datanodeDetailsProto.getNetworkName(), datanodeDetailsProto.getNetworkNameBytes()); } if (datanodeDetailsProto.hasNetworkLocation()) { - builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation()); + builder.setNetworkLocation( + datanodeDetailsProto.getNetworkLocation(), datanodeDetailsProto.getNetworkLocationBytes()); + } + if (datanodeDetailsProto.hasLevel()) { + builder.setLevel(datanodeDetailsProto.getLevel()); } if (datanodeDetailsProto.hasPersistedOpState()) { builder.setPersistedOpState(datanodeDetailsProto.getPersistedOpState()); @@ -371,6 +389,9 @@ public static DatanodeDetails.Builder newBuilder( builder.setPersistedOpStateExpiry( datanodeDetailsProto.getPersistedOpStateExpiry()); } + if (datanodeDetailsProto.hasCurrentVersion()) { + builder.setCurrentVersion(datanodeDetailsProto.getCurrentVersion()); + } return builder; } @@ -439,22 +460,25 @@ public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( HddsProtos.DatanodeDetailsProto.newBuilder() .setUuid128(uuid128); - builder.setUuid(getUuidString()); + builder.setUuidBytes(uuidString.getBytes()); if (ipAddress != null) { - builder.setIpAddress(ipAddress); + builder.setIpAddressBytes(ipAddress.getBytes()); } if (hostName != null) { - builder.setHostName(hostName); + builder.setHostNameBytes(hostName.getBytes()); } if (certSerialId != null) { builder.setCertSerialId(certSerialId); } if (!Strings.isNullOrEmpty(getNetworkName())) { - builder.setNetworkName(getNetworkName()); + builder.setNetworkNameBytes(getNetworkNameAsByteString().getBytes()); } if (!Strings.isNullOrEmpty(getNetworkLocation())) { - builder.setNetworkLocation(getNetworkLocation()); + builder.setNetworkLocationBytes(getNetworkLocationAsByteString().getBytes()); + } + if (getLevel() > 0) { + builder.setLevel(getLevel()); } if (persistedOpState != null) { builder.setPersistedOpState(persistedOpState); @@ -475,6 +499,8 @@ public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( } } + builder.setCurrentVersion(currentVersion); + return builder; } @@ -505,6 +531,7 @@ public ExtendedDatanodeDetailsProto getExtendedProtoBufMessage() { } /** + * Note: Datanode initial version is not passed to the client due to no use case. See HDDS-9884 * @return the version this datanode was initially created with */ public int getInitialVersion() { @@ -581,10 +608,11 @@ public String threadNamePrefix() { */ public static final class Builder { private UUID id; - private String ipAddress; - private String hostName; - private String networkName; - private String networkLocation; + private StringWithByteString ipAddress; + private StringWithByteString hostName; + private StringWithByteString networkName; + private StringWithByteString networkLocation; + private int level; private List ports; private String certSerialId; private String version; @@ -612,10 +640,11 @@ private Builder() { */ public Builder setDatanodeDetails(DatanodeDetails details) { this.id = details.getUuid(); - this.ipAddress = details.getIpAddress(); - this.hostName = details.getHostName(); - this.networkName = details.getNetworkName(); - this.networkLocation = details.getNetworkLocation(); + this.ipAddress = details.getIpAddressAsByteString(); + this.hostName = details.getHostNameAsByteString(); + this.networkName = details.getHostNameAsByteString(); + this.networkLocation = details.getNetworkLocationAsByteString(); + this.level = details.getLevel(); this.ports = details.getPorts(); this.certSerialId = details.getCertSerialId(); this.version = details.getVersion(); @@ -646,7 +675,19 @@ public Builder setUuid(UUID uuid) { * @return DatanodeDetails.Builder */ public Builder setIpAddress(String ip) { - this.ipAddress = ip; + this.ipAddress = StringWithByteString.valueOf(ip); + return this; + } + + /** + * Sets the IP address of DataNode. + * + * @param ip address + * @param ipBytes address in Bytes + * @return DatanodeDetails.Builder + */ + public Builder setIpAddress(String ip, ByteString ipBytes) { + this.ipAddress = new StringWithByteString(ip, ipBytes); return this; } @@ -657,7 +698,19 @@ public Builder setIpAddress(String ip) { * @return DatanodeDetails.Builder */ public Builder setHostName(String host) { - this.hostName = host; + this.hostName = StringWithByteString.valueOf(host); + return this; + } + + /** + * Sets the hostname of DataNode. + * + * @param host hostname + * @param hostBytes hostname + * @return DatanodeDetails.Builder + */ + public Builder setHostName(String host, ByteString hostBytes) { + this.hostName = new StringWithByteString(host, hostBytes); return this; } @@ -665,10 +718,11 @@ public Builder setHostName(String host) { * Sets the network name of DataNode. * * @param name network name + * @param nameBytes network name * @return DatanodeDetails.Builder */ - public Builder setNetworkName(String name) { - this.networkName = name; + public Builder setNetworkName(String name, ByteString nameBytes) { + this.networkName = new StringWithByteString(name, nameBytes); return this; } @@ -679,7 +733,19 @@ public Builder setNetworkName(String name) { * @return DatanodeDetails.Builder */ public Builder setNetworkLocation(String loc) { - this.networkLocation = loc; + return setNetworkLocation(loc, null); + } + + public Builder setNetworkLocation(String loc, ByteString locBytes) { + final String normalized = NetUtils.normalize(loc); + this.networkLocation = normalized.equals(loc) && locBytes != null + ? new StringWithByteString(normalized, locBytes) + : StringWithByteString.valueOf(normalized); + return this; + } + + public Builder setLevel(int level) { + this.level = level; return this; } @@ -797,17 +863,10 @@ public Builder setCurrentVersion(int v) { */ public DatanodeDetails build() { Preconditions.checkNotNull(id); - if (networkLocation == null) { - networkLocation = NetConstants.DEFAULT_RACK; + if (networkLocation == null || networkLocation.getString().isEmpty()) { + networkLocation = NetConstants.BYTE_STRING_DEFAULT_RACK; } - DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName, - networkLocation, ports, certSerialId, version, setupTime, revision, - buildDate, persistedOpState, persistedOpStateExpiryEpochSec, - initialVersion, currentVersion); - if (networkName != null) { - dn.setNetworkName(networkName); - } - return dn; + return new DatanodeDetails(this); } } @@ -854,9 +913,6 @@ public enum Name { /** * Private constructor for constructing Port object. Use * DatanodeDetails#newPort to create a new Port object. - * - * @param name - * @param value */ private Port(Name name, Integer value) { this.name = name; @@ -1011,4 +1067,12 @@ public String getBuildDate() { public void setBuildDate(String date) { this.buildDate = date; } + + @Override + public HddsProtos.NetworkNode toProtobuf( + int clientVersion) { + return HddsProtos.NetworkNode.newBuilder() + .setDatanodeDetails(toProtoBuilder(clientVersion).build()) + .build(); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index cb7f6f8a3b31..5288c0bf50bb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -61,6 +61,7 @@ import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.RoutingTable; +import org.apache.ratis.retry.RetryPolicies; import org.apache.ratis.retry.RetryPolicy; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; @@ -234,8 +235,8 @@ public static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, private static RpcType getRpcType(ConfigurationSource conf) { return SupportedRpcType.valueOfIgnoreCase(conf.get( - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); } public static BiFunction newRaftClient( @@ -244,6 +245,12 @@ public static BiFunction newRaftClient( RatisHelper.createRetryPolicy(conf), tlsConfig, conf); } + public static BiFunction newRaftClientNoRetry( + ConfigurationSource conf) { + return (leader, tlsConfig) -> newRaftClient(getRpcType(conf), leader, + RetryPolicies.noRetry(), tlsConfig, conf); + } + public static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig, ConfigurationSource configuration) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 42a74dd12c2e..dbbfa9923e8a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -41,95 +41,95 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_DB_DIRS_PERMISSIONS = "ozone.scm.db.dirs.permissions"; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = "dfs.container.ratis.enabled"; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY + = "hdds.container.ratis.enabled"; + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = "dfs.container.ratis.rpc.type"; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY + = "hdds.container.ratis.rpc.type"; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME - = "dfs.container.ratis.num.write.chunk.threads.per.volume"; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME + = "hdds.container.ratis.num.write.chunk.threads.per.volume"; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = "dfs.container.ratis.replication.level"; + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = "hdds.container.ratis.replication.level"; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = "dfs.container.ratis.num.container.op.executors"; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = "hdds.container.ratis.num.container.op.executors"; + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = - "dfs.container.ratis.segment.size"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = + "hdds.container.ratis.segment.size"; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = "64MB"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = - "dfs.container.ratis.segment.preallocated.size"; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = + "hdds.container.ratis.segment.preallocated.size"; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - "dfs.container.ratis.statemachinedata.sync.timeout"; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + "hdds.container.ratis.statemachinedata.sync.timeout"; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = TimeDuration.valueOf(10, TimeUnit.SECONDS); public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - "dfs.container.ratis.statemachinedata.sync.retries"; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + "hdds.container.ratis.statemachinedata.sync.retries"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = - "dfs.container.ratis.statemachine.max.pending.apply-transactions"; + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = + "hdds.container.ratis.statemachine.max.pending.apply-transactions"; // The default value of maximum number of pending state machine apply // transactions is kept same as default snapshot threshold. public static final int - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = 100000; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.queue.num-elements"; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + "hdds.container.ratis.log.queue.num-elements"; + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = 1024; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.queue.byte-limit"; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + "hdds.container.ratis.log.queue.byte-limit"; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = "4GB"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.appender.queue.num-elements"; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + "hdds.container.ratis.log.appender.queue.num-elements"; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.appender.queue.byte-limit"; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1024; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + "hdds.container.ratis.log.appender.queue.byte-limit"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - "dfs.container.ratis.log.purge.gap"; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = + "hdds.container.ratis.log.purge.gap"; // TODO: Set to 1024 once RATIS issue around purge is fixed. - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = - "dfs.container.ratis.leader.pending.bytes.limit"; + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + "hdds.container.ratis.leader.pending.bytes.limit"; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - "dfs.ratis.server.retry-cache.timeout.duration"; + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + "hdds.ratis.server.retry-cache.timeout.duration"; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "dfs.ratis.leader.election.minimum.timeout.duration"; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + "hdds.ratis.leader.election.minimum.timeout.duration"; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(5, TimeUnit.SECONDS); - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - "dfs.ratis.snapshot.threshold"; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = + "hdds.ratis.snapshot.threshold"; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; @@ -146,8 +146,8 @@ public final class ScmConfigKeys { "32KB"; public static final String OZONE_CHUNK_LIST_INCREMENTAL = - "ozone.chunk.list.incremental"; - public static final boolean OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT = false; + "ozone.incremental.chunk.list"; + public static final boolean OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT = true; public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY = "ozone.scm.container.layout"; @@ -227,17 +227,7 @@ public final class ScmConfigKeys { "hdds.datanode.dir.du.reserved"; public static final String HDDS_DATANODE_DIR_DU_RESERVED_PERCENT = "hdds.datanode.dir.du.reserved.percent"; - public static final float HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT = 0; - public static final String HDDS_REST_CSRF_ENABLED_KEY = - "hdds.rest.rest-csrf.enabled"; - public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false; - public static final String HDDS_REST_NETTY_HIGH_WATERMARK = - "hdds.rest.netty.high.watermark"; - public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536; - public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768; - public static final String HDDS_REST_NETTY_LOW_WATERMARK = - "hdds.rest.netty.low.watermark"; - + public static final float HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT = 0.0001f; public static final String OZONE_SCM_HANDLER_COUNT_KEY = "ozone.scm.handler.count.key"; public static final String OZONE_SCM_CLIENT_HANDLER_COUNT_KEY = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java index b9d823e8d817..19c39698dec7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -26,9 +27,9 @@ * contains clusterId and the SCM Id. */ public final class ScmInfo { - private String clusterId; - private String scmId; - private List peerRoles; + private final String clusterId; + private final String scmId; + private final List peerRoles; /** * Builder for ScmInfo. @@ -36,7 +37,7 @@ public final class ScmInfo { public static class Builder { private String clusterId; private String scmId; - private List peerRoles; + private final List peerRoles; public Builder() { peerRoles = new ArrayList<>(); @@ -80,7 +81,7 @@ public ScmInfo build() { private ScmInfo(String clusterId, String scmId, List peerRoles) { this.clusterId = clusterId; this.scmId = scmId; - this.peerRoles = peerRoles; + this.peerRoles = Collections.unmodifiableList(peerRoles); } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java index b6834aba1eb1..61fd0d8f033d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerCommandResponseProto; +import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -60,7 +61,7 @@ public void setLogIndex(long logIndex) { } public List getDatanodes() { - return datanodes; + return Collections.unmodifiableList(datanodes); } public void addDatanode(DatanodeDetails dn) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 402398e36c3f..14fb0a40cd00 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -217,11 +217,12 @@ List queryNode(HddsProtos.NodeOperationalState opState, * Allows a list of hosts to be decommissioned. The hosts are identified * by their hostname and optionally port in the format foo.com:port. * @param hosts A list of hostnames, optionally with port + * @param force true to forcefully decommission Datanodes * @throws IOException * @return A list of DatanodeAdminError for any hosts which failed to * decommission */ - List decommissionNodes(List hosts) + List decommissionNodes(List hosts, boolean force) throws IOException; /** @@ -251,7 +252,7 @@ List recommissionNodes(List hosts) * @throws IOException */ List startMaintenanceNodes(List hosts, - int endHours) throws IOException; + int endHours, boolean force) throws IOException; /** * Creates a specified replication pipeline. @@ -356,13 +357,20 @@ Map> getSafeModeRuleStatuses() /** * Start ContainerBalancer. */ + @SuppressWarnings("checkstyle:parameternumber") StartContainerBalancerResponseProto startContainerBalancer( Optional threshold, Optional iterations, Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException; + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException; /** * Stop ContainerBalancer. @@ -452,4 +460,6 @@ StatusAndMessages queryUpgradeFinalizationProgress( DecommissionScmResponseProto decommissionScm( String scmId) throws IOException; + + String getMetrics(String query) throws IOException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java index b11428581e7b..6bf2d5500c88 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java @@ -90,31 +90,19 @@ public static Codec getCodec() { // container replica should have the same sequenceId. private long sequenceId; - @SuppressWarnings("parameternumber") - private ContainerInfo( - long containerID, - HddsProtos.LifeCycleState state, - PipelineID pipelineID, - long usedBytes, - long numberOfKeys, - long stateEnterTime, - String owner, - long deleteTransactionId, - long sequenceId, - ReplicationConfig repConfig, - Clock clock) { - this.containerID = ContainerID.valueOf(containerID); - this.pipelineID = pipelineID; - this.usedBytes = usedBytes; - this.numberOfKeys = numberOfKeys; - this.lastUsed = clock.instant(); - this.state = state; - this.stateEnterTime = Instant.ofEpochMilli(stateEnterTime); - this.owner = owner; - this.deleteTransactionId = deleteTransactionId; - this.sequenceId = sequenceId; - this.replicationConfig = repConfig; - this.clock = clock; + private ContainerInfo(Builder b) { + containerID = ContainerID.valueOf(b.containerID); + pipelineID = b.pipelineID; + usedBytes = b.used; + numberOfKeys = b.keys; + lastUsed = b.clock.instant(); + state = b.state; + stateEnterTime = Instant.ofEpochMilli(b.stateEnterTime); + owner = b.owner; + deleteTransactionId = b.deleteTransactionId; + sequenceId = b.sequenceId; + replicationConfig = b.replicationConfig; + clock = b.clock; } public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) { @@ -445,9 +433,7 @@ public Builder setClock(Clock clock) { } public ContainerInfo build() { - return new ContainerInfo(containerID, state, pipelineID, - used, keys, stateEnterTime, owner, deleteTransactionId, - sequenceId, replicationConfig, clock); + return new ContainerInfo(this); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java index 7ac0401af117..5a1d8f90ea84 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java @@ -26,8 +26,8 @@ * contains a Pipeline and the key. */ public final class AllocatedBlock { - private Pipeline pipeline; - private ContainerBlockID containerBlockID; + private final Pipeline pipeline; + private final ContainerBlockID containerBlockID; /** * Builder for AllocatedBlock. @@ -63,4 +63,14 @@ public Pipeline getPipeline() { public ContainerBlockID getBlockID() { return containerBlockID; } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder() + .setContainerBlockID(containerBlockID) + .setPipeline(pipeline); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java index 2577a1e5ea2a..258c0be89612 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java @@ -25,6 +25,7 @@ import java.time.Clock; import java.time.ZoneOffset; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.Map; @@ -38,28 +39,24 @@ */ public class ExcludeList { - private final Map datanodes; - private final Set containerIds; - private final Set pipelineIds; + private final Map datanodes = new ConcurrentHashMap<>(); + private final Set containerIds = new HashSet<>(); + private final Set pipelineIds = new HashSet<>(); private long expiryTime = 0; - private java.time.Clock clock; + private final Clock clock; public ExcludeList() { - datanodes = new ConcurrentHashMap<>(); - containerIds = new HashSet<>(); - pipelineIds = new HashSet<>(); clock = Clock.system(ZoneOffset.UTC); } - public ExcludeList(long autoExpiryTime, java.time.Clock clock) { - this(); + public ExcludeList(long autoExpiryTime, Clock clock) { this.expiryTime = autoExpiryTime; this.clock = clock; } public Set getContainerIds() { - return containerIds; + return Collections.unmodifiableSet(containerIds); } public Set getDatanodes() { @@ -99,7 +96,7 @@ public void addPipeline(PipelineID pipelineId) { } public Set getPipelineIds() { - return pipelineIds; + return Collections.unmodifiableSet(pipelineIds); } public HddsProtos.ExcludeListProto getProtoBuf() { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java index 80e09af172b6..af4e72993839 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -65,7 +65,7 @@ public final class SCMHAUtils { public static final Logger LOG = LoggerFactory.getLogger(SCMHAUtils.class); - private static final List> + private static final ImmutableList> RETRIABLE_WITH_NO_FAILOVER_EXCEPTION_LIST = ImmutableList.>builder() .add(LeaderNotReadyException.class) @@ -74,7 +74,7 @@ public final class SCMHAUtils { .add(ResourceUnavailableException.class) .build(); - private static final List> + private static final ImmutableList> NON_RETRIABLE_EXCEPTION_LIST = ImmutableList.>builder() .add(SCMException.class) @@ -316,7 +316,7 @@ public static Throwable getExceptionForClass(Exception e, return null; } - public static List> getRetriableWithNoFailoverExceptionList() { return RETRIABLE_WITH_NO_FAILOVER_EXCEPTION_LIST; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java index c87d826d2529..6074e7da0afc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java @@ -20,6 +20,8 @@ import java.util.Collection; import java.util.List; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + /** * The interface defines an inner node in a network topology. * An inner node represents network topology entities, such as data center, @@ -89,4 +91,16 @@ N newInnerNode(String name, String location, InnerNode parent, int level, */ Node getLeaf(int leafIndex, List excludedScopes, Collection excludedNodes, int ancestorGen); + + @Override + HddsProtos.NetworkNode toProtobuf(int clientVersion); + + boolean equals(Object o); + + int hashCode(); + + static InnerNode fromProtobuf( + HddsProtos.InnerNode innerNode) { + return InnerNodeImpl.fromProtobuf(innerNode); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index f2648f3d294c..332dddac25c9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -27,6 +27,7 @@ import java.util.Map; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,10 +48,10 @@ public InnerNodeImpl newInnerNode(String name, String location, } } - static final Factory FACTORY = new Factory(); + public static final Factory FACTORY = new Factory(); // a map of node's network name to Node for quick search and keep // the insert order - private final HashMap childrenMap = + private HashMap childrenMap = new LinkedHashMap(); // number of descendant leaves under this node private int numOfLeaves; @@ -66,6 +67,76 @@ protected InnerNodeImpl(String name, String location, InnerNode parent, super(name, location, parent, level, cost); } + /** + * Construct an InnerNode from its name, network location, level, cost, + * childrenMap and number of leaves. This constructor is used as part of + * protobuf deserialization. + */ + protected InnerNodeImpl(String name, String location, int level, int cost, + HashMap childrenMap, int numOfLeaves) { + super(name, location, null, level, cost); + this.childrenMap = childrenMap; + this.numOfLeaves = numOfLeaves; + } + + /** + * InnerNodeImpl Builder to help construct an InnerNodeImpl object from + * protobuf objects. + */ + public static class Builder { + private String name; + private String location; + private int cost; + private int level; + private HashMap childrenMap = new LinkedHashMap<>(); + private int numOfLeaves; + + public Builder setName(String name) { + this.name = name; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setCost(int cost) { + this.cost = cost; + return this; + } + + public Builder setLevel(int level) { + this.level = level; + return this; + } + + public Builder setChildrenMap( + List childrenMapList) { + HashMap newChildrenMap = new LinkedHashMap<>(); + for (HddsProtos.ChildrenMap childrenMapProto : + childrenMapList) { + String networkName = childrenMapProto.hasNetworkName() ? + childrenMapProto.getNetworkName() : null; + Node node = childrenMapProto.hasNetworkNode() ? + Node.fromProtobuf(childrenMapProto.getNetworkNode()) : null; + newChildrenMap.put(networkName, node); + } + this.childrenMap = newChildrenMap; + return this; + } + + public Builder setNumOfLeaves(int numOfLeaves) { + this.numOfLeaves = numOfLeaves; + return this; + } + + public InnerNodeImpl build() { + return new InnerNodeImpl(name, location, level, cost, childrenMap, + numOfLeaves); + } + } + /** @return the number of children this node has */ private int getNumOfChildren() { return childrenMap.size(); @@ -77,6 +148,11 @@ public int getNumOfLeaves() { return numOfLeaves; } + /** @return a map of node's network name to Node. */ + public HashMap getChildrenMap() { + return childrenMap; + } + /** * @return number of its all nodes at level level. Here level is a * relative level. If level is 1, means node itself. If level is 2, means its @@ -390,14 +466,83 @@ public Node getLeaf(int leafIndex, List excludedScopes, } @Override - public boolean equals(Object to) { - if (to == null) { - return false; + public HddsProtos.NetworkNode toProtobuf( + int clientVersion) { + + HddsProtos.InnerNode.Builder innerNode = + HddsProtos.InnerNode.newBuilder() + .setNumOfLeaves(numOfLeaves) + .setNodeTopology( + NodeImpl.toProtobuf(getNetworkName(), getNetworkLocation(), + getLevel(), getCost())); + + if (childrenMap != null && !childrenMap.isEmpty()) { + for (Map.Entry entry : childrenMap.entrySet()) { + if (entry.getValue() != null) { + HddsProtos.ChildrenMap childrenMapProto = + HddsProtos.ChildrenMap.newBuilder() + .setNetworkName(entry.getKey()) + .setNetworkNode(entry.getValue().toProtobuf(clientVersion)) + .build(); + innerNode.addChildrenMap(childrenMapProto); + } + } + } + innerNode.build(); + + HddsProtos.NetworkNode networkNode = + HddsProtos.NetworkNode.newBuilder() + .setInnerNode(innerNode).build(); + + return networkNode; + } + + public static InnerNode fromProtobuf(HddsProtos.InnerNode innerNode) { + InnerNodeImpl.Builder builder = new InnerNodeImpl.Builder(); + + if (innerNode.hasNodeTopology()) { + HddsProtos.NodeTopology nodeTopology = innerNode.getNodeTopology(); + + if (nodeTopology.hasName()) { + builder.setName(nodeTopology.getName()); + } + if (nodeTopology.hasLocation()) { + builder.setLocation(nodeTopology.getLocation()); + } + if (nodeTopology.hasLevel()) { + builder.setLevel(nodeTopology.getLevel()); + } + if (nodeTopology.hasCost()) { + builder.setCost(nodeTopology.getCost()); + } + } + + if (!innerNode.getChildrenMapList().isEmpty()) { + builder.setChildrenMap(innerNode.getChildrenMapList()); + } + if (innerNode.hasNumOfLeaves()) { + builder.setNumOfLeaves(innerNode.getNumOfLeaves()); } - if (this == to) { + + return builder.build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { return true; } - return this.toString().equals(to.toString()); + if (o == null || getClass() != o.getClass()) { + return false; + } + InnerNodeImpl innerNode = (InnerNodeImpl) o; + return this.getNetworkName().equals(innerNode.getNetworkName()) && + this.getNetworkLocation().equals(innerNode.getNetworkLocation()) && + this.getLevel() == innerNode.getLevel() && + this.getCost() == innerNode.getCost() && + this.numOfLeaves == innerNode.numOfLeaves && + this.childrenMap.size() == innerNode.childrenMap.size() && + this.childrenMap.equals(innerNode.childrenMap); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java index 8ee6decc9c4d..bd1aa71ebd72 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.net; import org.apache.hadoop.hdds.scm.net.NodeSchema.LayerType; +import org.apache.hadoop.util.StringWithByteString; /** * Class to hold network topology related constants and configurations. @@ -32,11 +33,13 @@ private NetConstants() { public static final String SCOPE_REVERSE_STR = "~"; /** string representation of root. */ public static final String ROOT = ""; + public static final StringWithByteString BYTE_STRING_ROOT = StringWithByteString.valueOf(ROOT); public static final int INNER_NODE_COST_DEFAULT = 1; public static final int NODE_COST_DEFAULT = 0; public static final int ANCESTOR_GENERATION_DEFAULT = 0; public static final int ROOT_LEVEL = 1; public static final String DEFAULT_RACK = "/default-rack"; + public static final StringWithByteString BYTE_STRING_DEFAULT_RACK = StringWithByteString.valueOf(DEFAULT_RACK); public static final String DEFAULT_NODEGROUP = "/default-nodegroup"; public static final String DEFAULT_DATACENTER = "/default-datacenter"; public static final String DEFAULT_REGION = "/default-dataregion"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 2dc86c1b6856..1f3d0f02e6de 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -30,6 +30,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.NavigableMap; +import java.util.Objects; import java.util.TreeMap; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.locks.ReadWriteLock; @@ -75,6 +76,15 @@ public NetworkTopologyImpl(ConfigurationSource conf) { schemaManager.getCost(NetConstants.ROOT_LEVEL)); } + public NetworkTopologyImpl(String schemaFile, InnerNode clusterTree) { + schemaManager = NodeSchemaManager.getInstance(); + schemaManager.init(schemaFile); + maxLevel = schemaManager.getMaxLevel(); + shuffleOperation = Collections::shuffle; + factory = InnerNodeImpl.FACTORY; + this.clusterTree = clusterTree; + } + @VisibleForTesting public NetworkTopologyImpl(NodeSchemaManager manager, Consumer> shuffleOperation) { @@ -223,10 +233,10 @@ public boolean contains(Node node) { private boolean containsNode(Node node) { Node parent = node.getParent(); - while (parent != null && parent != clusterTree) { + while (parent != null && !Objects.equals(parent, clusterTree)) { parent = parent.getParent(); } - return parent == clusterTree; + return Objects.equals(parent, clusterTree); } /** @@ -240,7 +250,9 @@ public boolean isSameAncestor(Node node1, Node node2, int ancestorGen) { } netlock.readLock().lock(); try { - return node1.getAncestor(ancestorGen) == node2.getAncestor(ancestorGen); + Node ancestor1 = node1.getAncestor(ancestorGen); + Node ancestor2 = node2.getAncestor(ancestorGen); + return Objects.equals(ancestor1, ancestor2); } finally { netlock.readLock().unlock(); } @@ -259,7 +271,7 @@ public boolean isSameParent(Node node1, Node node2) { try { node1 = node1.getParent(); node2 = node2.getParent(); - return node1 == node2; + return Objects.equals(node1, node2); } finally { netlock.readLock().unlock(); } @@ -704,8 +716,7 @@ private Node chooseNodeInternal(String scope, int leafIndex, */ @Override public int getDistanceCost(Node node1, Node node2) { - if ((node1 != null && node1.equals(node2)) || - (node1 == null && node2 == null)) { + if (Objects.equals(node1, node2)) { return 0; } if (node1 == null || node2 == null) { @@ -726,8 +737,10 @@ public int getDistanceCost(Node node1, Node node2) { int cost = 0; netlock.readLock().lock(); try { - if ((node1.getAncestor(level1 - 1) != clusterTree) || - (node2.getAncestor(level2 - 1) != clusterTree)) { + Node ancestor1 = node1.getAncestor(level1 - 1); + Node ancestor2 = node2.getAncestor(level2 - 1); + if (!Objects.equals(ancestor1, clusterTree) || + !Objects.equals(ancestor2, clusterTree)) { LOG.debug("One of the nodes is outside of network topology"); return Integer.MAX_VALUE; } @@ -741,7 +754,7 @@ public int getDistanceCost(Node node1, Node node2) { level2--; cost += node2 == null ? 0 : node2.getCost(); } - while (node1 != null && node2 != null && node1 != node2) { + while (node1 != null && node2 != null && !Objects.equals(node1, node2)) { node1 = node1.getParent(); node2 = node2.getParent(); cost += node1 == null ? 0 : node1.getCost(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java index 9884888a1dd4..50f702cce08e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdds.scm.net; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + /** * The interface defines a node in a network topology. * A node may be a leave representing a data node or an inner @@ -126,4 +129,21 @@ public interface Node { * @return true if this node is under a specific scope */ boolean isDescendant(String nodePath); + + default HddsProtos.NetworkNode toProtobuf( + int clientVersion) { + return null; + } + + static Node fromProtobuf( + HddsProtos.NetworkNode networkNode) { + if (networkNode.hasDatanodeDetails()) { + return DatanodeDetails.getFromProtoBuf( + networkNode.getDatanodeDetails()); + } else if (networkNode.hasInnerNode()) { + return InnerNode.fromProtobuf(networkNode.getInnerNode()); + } else { + return null; + } + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java index e7a45f649b6e..f5f6cec099b2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java @@ -18,8 +18,10 @@ package org.apache.hadoop.hdds.scm.net; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.util.StringWithByteString; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; +import static org.apache.hadoop.hdds.scm.net.NetConstants.BYTE_STRING_ROOT; import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; /** @@ -27,9 +29,9 @@ */ public class NodeImpl implements Node { // host:port# - private String name; + private StringWithByteString name; // string representation of this node's location, such as /dc1/rack1 - private String location; + private StringWithByteString location; // location + "/" + name private String path; // which level of the tree the node resides, start from 1 for root @@ -45,18 +47,22 @@ public class NodeImpl implements Node { * {@link NetConstants#PATH_SEPARATOR}) * @param location this node's location */ - public NodeImpl(String name, String location, int cost) { - if (name != null && name.contains(PATH_SEPARATOR_STR)) { + public NodeImpl(StringWithByteString name, StringWithByteString location, int cost) { + if (name != null && name.getString().contains(PATH_SEPARATOR_STR)) { throw new IllegalArgumentException( "Network location name:" + name + " should not contain " + PATH_SEPARATOR_STR); } - this.name = (name == null) ? ROOT : name; - this.location = NetUtils.normalize(location); + this.name = name == null ? BYTE_STRING_ROOT : name; + this.location = location; this.path = getPath(); this.cost = cost; } + public NodeImpl(String name, String location, int cost) { + this(StringWithByteString.valueOf(name), StringWithByteString.valueOf(NetUtils.normalize(location)), cost); + } + /** * Construct a node from its name and its location. * @@ -74,11 +80,25 @@ public NodeImpl(String name, String location, InnerNode parent, int level, this.level = level; } + public NodeImpl(StringWithByteString name, StringWithByteString location, InnerNode parent, int level, + int cost) { + this(name, location, cost); + this.parent = parent; + this.level = level; + } + /** * @return this node's name */ @Override public String getNetworkName() { + return name.getString(); + } + + /** + * @return this node's name + */ + public StringWithByteString getNetworkNameAsByteString() { return name; } @@ -88,6 +108,15 @@ public String getNetworkName() { */ @Override public void setNetworkName(String networkName) { + this.name = StringWithByteString.valueOf(networkName); + this.path = getPath(); + } + + /** + * Set this node's name, can be hostname or Ipaddress. + * @param networkName it's network name + */ + public void setNetworkName(StringWithByteString networkName) { this.name = networkName; this.path = getPath(); } @@ -97,6 +126,13 @@ public void setNetworkName(String networkName) { */ @Override public String getNetworkLocation() { + return location.getString(); + } + + /** + * @return this node's network location + */ + public StringWithByteString getNetworkLocationAsByteString() { return location; } @@ -106,7 +142,7 @@ public String getNetworkLocation() { */ @Override public void setNetworkLocation(String networkLocation) { - this.location = networkLocation; + this.location = StringWithByteString.valueOf(networkLocation); this.path = getPath(); } @@ -229,6 +265,20 @@ public boolean isDescendant(String nodePath) { NetUtils.addSuffix(nodePath)); } + public static HddsProtos.NodeTopology toProtobuf(String name, String location, + int level, int cost) { + + HddsProtos.NodeTopology.Builder nodeTopologyBuilder = + HddsProtos.NodeTopology.newBuilder() + .setName(name) + .setLocation(location) + .setLevel(level) + .setCost(cost); + + HddsProtos.NodeTopology nodeTopology = nodeTopologyBuilder.build(); + return nodeTopology; + } + @Override public boolean equals(Object to) { if (to == null) { @@ -254,8 +304,8 @@ public String toString() { } private String getPath() { - return this.location.equals(PATH_SEPARATOR_STR) ? - this.location + this.name : + return this.location.getString().equals(PATH_SEPARATOR_STR) ? + this.location + this.name.getString() : this.location + PATH_SEPARATOR_STR + this.name; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java index eecd79876720..fb37b214cad1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java @@ -62,6 +62,14 @@ public void init(ConfigurationSource conf) { String schemaFile = conf.get( ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT); + loadSchemaFile(schemaFile); + } + + public void init(String schemaFile) { + loadSchemaFile(schemaFile); + } + + private void loadSchemaFile(String schemaFile) { NodeSchemaLoadResult result; try { result = NodeSchemaLoader.getInstance().loadSchemaFromFile(schemaFile); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 9d95cee48366..6ea92f74c193 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -23,7 +23,6 @@ import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; @@ -34,6 +33,8 @@ import java.util.UUID; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; @@ -76,10 +77,10 @@ public static Codec getCodec() { private final ReplicationConfig replicationConfig; private final PipelineState state; - private Map nodeStatus; - private Map replicaIndexes; + private final Map nodeStatus; + private final Map replicaIndexes; // nodes with ordered distance to client - private List nodesInOrder = new ArrayList<>(); + private final ImmutableList nodesInOrder; // Current reported Leader for the pipeline private UUID leaderId; // Timestamp for pipeline upon creation @@ -103,17 +104,17 @@ public static Codec getCodec() { * set to Instant.now when you crate the Pipeline object as part of * state change. */ - private Pipeline(PipelineID id, - ReplicationConfig replicationConfig, PipelineState state, - Map nodeStatus, UUID suggestedLeaderId) { - this.id = id; - this.replicationConfig = replicationConfig; - this.state = state; - this.nodeStatus = nodeStatus; - this.creationTimestamp = Instant.now(); - this.suggestedLeaderId = suggestedLeaderId; - this.replicaIndexes = new HashMap<>(); - this.stateEnterTime = Instant.now(); + private Pipeline(Builder b) { + id = b.id; + replicationConfig = b.replicationConfig; + state = b.state; + leaderId = b.leaderId; + suggestedLeaderId = b.suggestedLeaderId; + nodeStatus = b.nodeStatus; + nodesInOrder = b.nodesInOrder != null ? ImmutableList.copyOf(b.nodesInOrder) : ImmutableList.of(); + replicaIndexes = b.replicaIndexes; + creationTimestamp = b.creationTimestamp != null ? b.creationTimestamp : Instant.now(); + stateEnterTime = Instant.now(); } /** @@ -310,19 +311,6 @@ public boolean isOpen() { return state == PipelineState.OPEN; } - public boolean isAllocationTimeout() { - //TODO: define a system property to control the timeout value - return false; - } - - public void setNodesInOrder(List nodes) { - nodesInOrder.clear(); - if (null == nodes) { - return; - } - nodesInOrder.addAll(nodes); - } - public List getNodesInOrder() { if (nodesInOrder.isEmpty()) { LOG.debug("Nodes in order is empty, delegate to getNodes"); @@ -406,33 +394,39 @@ public HddsProtos.Pipeline getProtobufMessage(int clientVersion) // To save the message size on wire, only transfer the node order based on // network topology - List nodes = nodesInOrder; - if (!nodes.isEmpty()) { - for (int i = 0; i < nodes.size(); i++) { + if (!nodesInOrder.isEmpty()) { + for (int i = 0; i < nodesInOrder.size(); i++) { Iterator it = nodeStatus.keySet().iterator(); for (int j = 0; j < nodeStatus.keySet().size(); j++) { - if (it.next().equals(nodes.get(i))) { + if (it.next().equals(nodesInOrder.get(i))) { builder.addMemberOrders(j); break; } } } if (LOG.isDebugEnabled()) { - LOG.debug("Serialize pipeline {} with nodesInOrder {}", id, nodes); + LOG.debug("Serialize pipeline {} with nodesInOrder {}", id, nodesInOrder); } } return builder.build(); } - static Pipeline getFromProtobufSetCreationTimestamp( + private static Pipeline getFromProtobufSetCreationTimestamp( HddsProtos.Pipeline proto) throws UnknownPipelineStateException { - final Pipeline pipeline = getFromProtobuf(proto); - // When SCM is restarted, set Creation time with current time. - pipeline.setCreationTimestamp(Instant.now()); - return pipeline; + return toBuilder(proto) + .setCreateTimestamp(Instant.now()) + .build(); } - public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) + public Pipeline copyWithNodesInOrder(List nodes) { + return toBuilder().setNodesInOrder(nodes).build(); + } + + public Builder toBuilder() { + return newBuilder(this); + } + + public static Builder toBuilder(HddsProtos.Pipeline pipeline) throws UnknownPipelineStateException { Preconditions.checkNotNull(pipeline, "Pipeline is null"); @@ -473,9 +467,13 @@ public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) .setReplicaIndexes(nodes) .setLeaderId(leaderId) .setSuggestedLeaderId(suggestedLeaderId) - .setNodesInOrder(pipeline.getMemberOrdersList()) - .setCreateTimestamp(pipeline.getCreationTimeStamp()) - .build(); + .setNodeOrder(pipeline.getMemberOrdersList()) + .setCreateTimestamp(pipeline.getCreationTimeStamp()); + } + + public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) + throws UnknownPipelineStateException { + return toBuilder(pipeline).build(); } @Override @@ -529,10 +527,6 @@ public static Builder newBuilder(Pipeline pipeline) { return new Builder(pipeline); } - private void setReplicaIndexes(Map replicaIndexes) { - this.replicaIndexes = replicaIndexes; - } - /** * Builder class for Pipeline. */ @@ -546,7 +540,7 @@ public static class Builder { private UUID leaderId = null; private Instant creationTimestamp = null; private UUID suggestedLeaderId = null; - private Map replicaIndexes = new HashMap<>(); + private Map replicaIndexes = ImmutableMap.of(); public Builder() { } @@ -559,14 +553,15 @@ public Builder(Pipeline pipeline) { this.leaderId = pipeline.getLeaderId(); this.creationTimestamp = pipeline.getCreationTimestamp(); this.suggestedLeaderId = pipeline.getSuggestedLeaderId(); - this.replicaIndexes = new HashMap<>(); if (nodeStatus != null) { + final ImmutableMap.Builder b = ImmutableMap.builder(); for (DatanodeDetails dn : nodeStatus.keySet()) { int index = pipeline.getReplicaIndex(dn); if (index > 0) { - replicaIndexes.put(dn, index); + b.put(dn, index); } } + replicaIndexes = b.build(); } } @@ -601,8 +596,19 @@ public Builder setNodes(List nodes) { return this; } - public Builder setNodesInOrder(List orders) { - this.nodeOrder = orders; + public Builder setNodeOrder(List orders) { + // for build from ProtoBuf + this.nodeOrder = Collections.unmodifiableList(orders); + return this; + } + + public Builder setNodesInOrder(List nodes) { + this.nodesInOrder = new LinkedList<>(nodes); + return this; + } + + public Builder setCreateTimestamp(Instant instant) { + this.creationTimestamp = instant; return this; } @@ -618,7 +624,7 @@ public Builder setSuggestedLeaderId(UUID uuid) { public Builder setReplicaIndexes(Map indexes) { - this.replicaIndexes = indexes; + this.replicaIndexes = indexes == null ? ImmutableMap.of() : ImmutableMap.copyOf(indexes); return this; } @@ -627,19 +633,8 @@ public Pipeline build() { Preconditions.checkNotNull(replicationConfig); Preconditions.checkNotNull(state); Preconditions.checkNotNull(nodeStatus); - Pipeline pipeline = - new Pipeline(id, replicationConfig, state, nodeStatus, - suggestedLeaderId); - pipeline.setLeaderId(leaderId); - // overwrite with original creationTimestamp - if (creationTimestamp != null) { - pipeline.setCreationTimestamp(creationTimestamp); - } - - pipeline.setReplicaIndexes(replicaIndexes); if (nodeOrder != null && !nodeOrder.isEmpty()) { - // This branch is for build from ProtoBuf List nodesWithOrder = new ArrayList<>(); for (int i = 0; i < nodeOrder.size(); i++) { int nodeIndex = nodeOrder.get(i); @@ -657,13 +652,10 @@ public Pipeline build() { LOG.debug("Deserialize nodesInOrder {} in pipeline {}", nodesWithOrder, id); } - pipeline.setNodesInOrder(nodesWithOrder); - } else if (nodesInOrder != null) { - // This branch is for pipeline clone - pipeline.setNodesInOrder(nodesInOrder); + nodesInOrder = nodesWithOrder; } - return pipeline; + return new Pipeline(this); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index e8bddb42cfbd..df8ed02cf7f0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -245,14 +245,14 @@ List queryNode(HddsProtos.NodeOperationalState opState, HddsProtos.Node queryNode(UUID uuid) throws IOException; - List decommissionNodes(List nodes) + List decommissionNodes(List nodes, boolean force) throws IOException; List recommissionNodes(List nodes) throws IOException; List startMaintenanceNodes(List nodes, - int endInHours) throws IOException; + int endInHours, boolean force) throws IOException; /** * Close a container. @@ -402,13 +402,20 @@ Map> getSafeModeRuleStatuses() * @return {@link StartContainerBalancerResponseProto} that contains the * start status and an optional message. */ + @SuppressWarnings("checkstyle:parameternumber") StartContainerBalancerResponseProto startContainerBalancer( Optional threshold, Optional iterations, Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException; + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException; /** * Stop ContainerBalancer. @@ -474,4 +481,6 @@ List getListOfContainers( DecommissionScmResponseProto decommissionScm( String scmId) throws IOException; + + String getMetrics(String query) throws IOException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java index 9acb0e5c33a7..d3f39c023b73 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java @@ -21,6 +21,8 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.function.Function; + +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; @@ -38,10 +40,12 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadContainerResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ListBlockResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion; @@ -210,6 +214,28 @@ public static ContainerCommandResponseProto getPutFileResponseSuccess( .build(); } + /** + * Gets a response for the WriteChunk RPC. + * @param msg - ContainerCommandRequestProto + * @return - ContainerCommandResponseProto + */ + public static ContainerCommandResponseProto getWriteChunkResponseSuccess( + ContainerCommandRequestProto msg, BlockData blockData) { + + WriteChunkResponseProto.Builder writeChunk = + WriteChunkResponseProto.newBuilder(); + if (blockData != null) { + writeChunk.setCommittedBlockLength( + getCommittedBlockLengthResponseBuilder( + blockData.getSize(), blockData.getBlockID())); + + } + return getSuccessResponseBuilder(msg) + .setCmdType(Type.WriteChunk) + .setWriteChunk(writeChunk) + .build(); + } + /** * Gets a response to the read small file call. * @param request - Msg @@ -319,6 +345,31 @@ public static ContainerCommandResponseProto getFinalizeBlockResponse( .build(); } + public static ContainerCommandResponseProto getEchoResponse( + ContainerCommandRequestProto msg) { + + ContainerProtos.EchoRequestProto echoRequest = msg.getEcho(); + int responsePayload = echoRequest.getPayloadSizeResp(); + + int sleepTimeMs = echoRequest.getSleepTimeMs(); + try { + if (sleepTimeMs > 0) { + Thread.sleep(sleepTimeMs); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + ContainerProtos.EchoResponseProto.Builder echo = + ContainerProtos.EchoResponseProto + .newBuilder() + .setPayload(UnsafeByteOperations.unsafeWrap(RandomUtils.nextBytes(responsePayload))); + + return getSuccessResponseBuilder(msg) + .setEcho(echo) + .build(); + } + private ContainerCommandResponseBuilders() { throw new UnsupportedOperationException("no instances"); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index c85405566ca5..37cc075f2195 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -29,6 +29,9 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; +import io.opentracing.Scope; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -57,6 +60,8 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.FinalizeBlockRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.EchoRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.EchoResponseProto; import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi.Validator; @@ -65,6 +70,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChecksumData; import org.apache.hadoop.security.token.Token; @@ -76,6 +82,7 @@ import org.slf4j.LoggerFactory; import static java.util.Collections.singletonList; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED; /** * Implementation of all container protocol calls performed by Container @@ -128,6 +135,10 @@ public static ListBlockResponseProto listBlock(XceiverClientSpi xceiverClient, if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); ContainerCommandResponseProto response = @@ -146,6 +157,17 @@ static T tryEachDatanode(Pipeline pipeline, try { return op.apply(d); } catch (IOException e) { + Span span = GlobalTracer.get().activeSpan(); + if (e instanceof StorageContainerException) { + StorageContainerException sce = (StorageContainerException)e; + // Block token expired. There's no point retrying other DN. + // Throw the exception to request a new block token right away. + if (sce.getResult() == BLOCK_TOKEN_VERIFICATION_FAILED) { + span.log("block token verification failed at DN " + d); + throw e; + } + } + span.log("failed to connect to DN " + d); excluded.add(d); if (excluded.size() < pipeline.size()) { LOG.warn(toErrorMessage.apply(d) @@ -203,6 +225,10 @@ private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, List validators, ContainerCommandRequestProto.Builder builder, DatanodeDetails datanode) throws IOException { + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } final ContainerCommandRequestProto request = builder .setDatanodeUuid(datanode.getUuidString()).build(); ContainerCommandResponseProto response = @@ -238,6 +264,10 @@ private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); ContainerCommandResponseProto response = xceiverClient.sendCommand(request, getValidatorList()); @@ -341,10 +371,19 @@ public static ContainerProtos.ReadChunkResponseProto readChunk( builder.setEncodedToken(token.encodeToUrlString()); } - return tryEachDatanode(xceiverClient.getPipeline(), - d -> readChunk(xceiverClient, chunk, blockID, - validators, builder, d), - d -> toErrorMessage(chunk, blockID, d)); + Span span = GlobalTracer.get() + .buildSpan("readChunk").start(); + try (Scope ignored = GlobalTracer.get().activateSpan(span)) { + span.setTag("offset", chunk.getOffset()) + .setTag("length", chunk.getLen()) + .setTag("block", blockID.toString()); + return tryEachDatanode(xceiverClient.getPipeline(), + d -> readChunk(xceiverClient, chunk, blockID, + validators, builder, d), + d -> toErrorMessage(chunk, blockID, d)); + } finally { + span.finish(); + } } private static ContainerProtos.ReadChunkResponseProto readChunk( @@ -352,10 +391,15 @@ private static ContainerProtos.ReadChunkResponseProto readChunk( List validators, ContainerCommandRequestProto.Builder builder, DatanodeDetails d) throws IOException { - final ContainerCommandRequestProto request = builder - .setDatanodeUuid(d.getUuidString()).build(); + ContainerCommandRequestProto.Builder requestBuilder = builder + .setDatanodeUuid(d.getUuidString()); + Span span = GlobalTracer.get().activeSpan(); + String traceId = TracingUtil.exportSpan(span); + if (traceId != null) { + requestBuilder = requestBuilder.setTraceID(traceId); + } ContainerCommandResponseProto reply = - xceiverClient.sendCommand(request, validators); + xceiverClient.sendCommand(requestBuilder.build(), validators); final ReadChunkResponseProto response = reply.getReadChunk(); final long readLen = getLen(response); if (readLen != chunk.getLen()) { @@ -394,8 +438,10 @@ static long getLen(ReadChunkResponseProto response) { */ public static XceiverClientReply writeChunkAsync( XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - ByteString data, String tokenString, int replicationIndex) + ByteString data, String tokenString, + int replicationIndex, BlockData blockData) throws IOException, ExecutionException, InterruptedException { + WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto.newBuilder() .setBlockID(DatanodeBlockID.newBuilder() @@ -406,6 +452,12 @@ public static XceiverClientReply writeChunkAsync( .build()) .setChunkData(chunk) .setData(data); + if (blockData != null) { + PutBlockRequestProto.Builder createBlockRequest = + PutBlockRequestProto.newBuilder() + .setBlockData(blockData); + writeChunkRequest.setBlock(createBlockRequest); + } String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder() @@ -537,6 +589,11 @@ public static void createContainer(XceiverClientSpi client, if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } + request.setCmdType(ContainerProtos.Type.CreateContainer); request.setContainerID(containerID); request.setCreateContainer(createRequest.build()); @@ -566,6 +623,10 @@ public static void deleteContainer(XceiverClientSpi client, long containerID, if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } client.sendCommand(request.build(), getValidatorList()); } @@ -588,6 +649,10 @@ public static void closeContainer(XceiverClientSpi client, if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } client.sendCommand(request.build(), getValidatorList()); } @@ -611,6 +676,10 @@ public static ReadContainerResponseProto readContainer( if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } ContainerCommandResponseProto response = client.sendCommand(request.build(), getValidatorList()); @@ -646,12 +715,53 @@ public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList()); return response.getGetSmallFile(); } + /** + * Send an echo to DataNode. + * + * @return EchoResponseProto + */ + public static EchoResponseProto echo(XceiverClientSpi client, String encodedContainerID, + long containerID, ByteString payloadReqBytes, int payloadRespSizeKB, int sleepTimeMs, boolean readOnly) + throws IOException { + ContainerProtos.EchoRequestProto getEcho = + EchoRequestProto + .newBuilder() + .setPayload(payloadReqBytes) + .setPayloadSizeResp(payloadRespSizeKB) + .setSleepTimeMs(sleepTimeMs) + .setReadOnly(readOnly) + .build(); + String id = client.getPipeline().getClosestNode().getUuidString(); + + ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto + .newBuilder() + .setCmdType(Type.Echo) + .setContainerID(containerID) + .setDatanodeUuid(id) + .setEcho(getEcho); + if (!encodedContainerID.isEmpty()) { + builder.setEncodedToken(encodedContainerID); + } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } + ContainerCommandRequestProto request = builder.build(); + ContainerCommandResponseProto response = + client.sendCommand(request, getValidatorList()); + return response.getEcho(); + } + /** * Validates a response from a container protocol call. Any non-successful * return code is mapped to a corresponding exception and thrown. @@ -675,7 +785,7 @@ public static void validateContainerResponse( response.getMessage(), response.getResult()); } - public static List getValidatorList() { + private static List getValidatorList() { return VALIDATORS; } @@ -716,6 +826,10 @@ public static List toValidatorList(Validator validator) { if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); Map responses = xceiverClient.sendCommandOnAllNodes(request); @@ -741,6 +855,10 @@ public static List toValidatorList(Validator validator) { if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } Map responses = client.sendCommandOnAllNodes(request.build()); for (Map.Entry entry : diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java index 9a9002195c82..028d6c8e0329 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java @@ -48,11 +48,6 @@ public class PemFileBasedKeyStoresFactory implements KeyStoresFactory, private static final Logger LOG = LoggerFactory.getLogger(PemFileBasedKeyStoresFactory.class); - /** - * Default format of the keystore files. - */ - public static final String DEFAULT_KEYSTORE_TYPE = "jks"; - private KeyManager[] keyManagers; private TrustManager[] trustManagers; private final CertificateClient caClient; @@ -67,8 +62,7 @@ public PemFileBasedKeyStoresFactory(CertificateClient client) { */ private void createTrustManagers() throws GeneralSecurityException, IOException { - ReloadingX509TrustManager trustManager = new ReloadingX509TrustManager( - DEFAULT_KEYSTORE_TYPE, caClient); + ReloadingX509TrustManager trustManager = new ReloadingX509TrustManager(KeyStore.getDefaultType(), caClient); trustManagers = new TrustManager[] {trustManager}; } @@ -78,8 +72,7 @@ private void createTrustManagers() throws */ private void createKeyManagers() throws GeneralSecurityException, IOException { - ReloadingX509KeyManager keystoreManager = - new ReloadingX509KeyManager(DEFAULT_KEYSTORE_TYPE, caClient); + ReloadingX509KeyManager keystoreManager = new ReloadingX509KeyManager(KeyStore.getDefaultType(), caClient); keyManagers = new KeyManager[] {keystoreManager}; } @@ -101,7 +94,7 @@ public synchronized void init(Mode mode, boolean requireClientAuth) if (requireClientAuth || mode == Mode.SERVER) { createKeyManagers(); } else { - KeyStore keystore = KeyStore.getInstance(DEFAULT_KEYSTORE_TYPE); + KeyStore keystore = KeyStore.getInstance(KeyStore.getDefaultType()); keystore.load(null, null); KeyManagerFactory keyMgrFactory = KeyManagerFactory .getInstance(KeyManagerFactory.getDefaultAlgorithm()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java index d14129972c61..f6894b17e373 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java @@ -20,18 +20,23 @@ import java.io.File; import java.io.IOException; +import java.io.Reader; import java.util.List; import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.MappingIterator; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.SequenceWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * JSON Utility functions used in ozone. @@ -43,6 +48,8 @@ public final class JsonUtils { // before use. private static final ObjectMapper MAPPER; private static final ObjectWriter WRITER; + private static final ObjectMapper INDENT_OUTPUT_MAPPER; // New mapper instance + public static final Logger LOG = LoggerFactory.getLogger(JsonUtils.class); static { MAPPER = new ObjectMapper() @@ -50,6 +57,12 @@ public final class JsonUtils { .registerModule(new JavaTimeModule()) .configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false); WRITER = MAPPER.writerWithDefaultPrettyPrinter(); + + INDENT_OUTPUT_MAPPER = new ObjectMapper() + .setSerializationInclusion(JsonInclude.Include.NON_NULL) + .registerModule(new JavaTimeModule()) + .configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false) + .enable(SerializationFeature.INDENT_OUTPUT); } private JsonUtils() { @@ -65,14 +78,37 @@ public static String toJsonString(Object obj) throws IOException { return MAPPER.writeValueAsString(obj); } + public static String toJsonStringWIthIndent(Object obj) { + try { + return INDENT_OUTPUT_MAPPER.writeValueAsString(obj); + } catch (JsonProcessingException e) { + LOG.error("Error in JSON serialization", e); + return "{}"; + } + } + public static ArrayNode createArrayNode() { return MAPPER.createArrayNode(); } public static ObjectNode createObjectNode(Object next) { + if (next == null) { + return MAPPER.createObjectNode(); + } return MAPPER.valueToTree(next); } + public static JsonNode readTree(String content) throws IOException { + return MAPPER.readTree(content); + } + + /** + * Reads JSON content from a Reader and deserializes it into a Java object. + */ + public static T readFromReader(Reader reader, Class valueType) throws IOException { + return MAPPER.readValue(reader, valueType); + } + /** * Utility to sequentially write a large collection of items to a file. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java index b968d407232c..29bd847319ea 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java @@ -139,6 +139,16 @@ public static boolean isTracingEnabled( ScmConfigKeys.HDDS_TRACING_ENABLED_DEFAULT); } + /** + * Execute {@code runnable} inside an activated new span. + */ + public static void executeInNewSpan(String spanName, + CheckedRunnable runnable) throws E { + Span span = GlobalTracer.get() + .buildSpan(spanName).start(); + executeInSpan(span, runnable); + } + /** * Execute {@code supplier} inside an activated new span. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java deleted file mode 100644 index c5640cb15429..000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import com.google.common.collect.Lists; - -import java.util.List; -import java.util.Objects; - -/** - * An utility class to store a batch of DB write operations. - */ -public class BatchOperation { - - /** - * Enum for write operations. - */ - public enum Operation { - DELETE, PUT - } - - private List operations = - Lists.newArrayList(); - - /** - * Add a PUT operation into the batch. - */ - public void put(byte[] key, byte[] value) { - operations.add(new SingleOperation(Operation.PUT, key, value)); - } - - /** - * Add a DELETE operation into the batch. - */ - public void delete(byte[] key) { - operations.add(new SingleOperation(Operation.DELETE, key, null)); - - } - - public List getOperations() { - return operations; - } - - /** - * A SingleOperation represents a PUT or DELETE operation - * and the data the operation needs to manipulates. - */ - static class SingleOperation { - - private final Operation opt; - private final byte[] key; - private final byte[] value; - - SingleOperation(Operation opt, byte[] key, byte[] value) { - this.opt = opt; - this.key = Objects.requireNonNull(key, "key cannot be null"); - this.value = value; - } - - public Operation getOpt() { - return opt; - } - - public byte[] getKey() { - return key; - } - - public byte[] getValue() { - return value; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java index 4620a483385e..6a234ab5064a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java @@ -67,7 +67,7 @@ public static void close(Logger logger, AutoCloseable... closeables) { * Close each argument, catching exceptions and logging them as error. */ public static void close(Logger logger, - Collection closeables) { + Collection closeables) { if (closeables == null) { return; } @@ -94,7 +94,7 @@ public static void closeQuietly(AutoCloseable... closeables) { /** * Close each argument, swallowing exceptions. */ - public static void closeQuietly(Collection closeables) { + public static void closeQuietly(Collection closeables) { close(null, closeables); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java index 64e494a5af10..1ac293b301bb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java @@ -28,6 +28,7 @@ import org.apache.ratis.thirdparty.io.netty.buffer.Unpooled; import org.apache.ratis.util.MemoizedSupplier; import org.apache.ratis.util.Preconditions; +import org.apache.ratis.util.UncheckedAutoCloseable; import org.apache.ratis.util.function.CheckedFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,7 +51,7 @@ * A buffer used by {@link Codec} * for supporting RocksDB direct {@link ByteBuffer} APIs. */ -public class CodecBuffer implements AutoCloseable { +public class CodecBuffer implements UncheckedAutoCloseable { public static final Logger LOG = LoggerFactory.getLogger(CodecBuffer.class); /** To create {@link CodecBuffer} instances. */ @@ -340,6 +341,12 @@ public int readableBytes() { return buf.readableBytes(); } + /** @return a writable {@link ByteBuffer}. */ + public ByteBuffer asWritableByteBuffer() { + assertRefCnt(1); + return buf.nioBuffer(0, buf.maxCapacity()); + } + /** @return a readonly {@link ByteBuffer} view of this buffer. */ public ByteBuffer asReadOnlyByteBuffer() { assertRefCnt(1); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index a0d4b59db168..38ebc80b27e6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -36,9 +36,9 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public final class OzoneConfigKeys { - public static final String DFS_CONTAINER_IPC_PORT = - "dfs.container.ipc"; - public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; + public static final String HDDS_CONTAINER_IPC_PORT = + "hdds.container.ipc.port"; + public static final int HDDS_CONTAINER_IPC_PORT_DEFAULT = 9859; public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; @@ -56,52 +56,52 @@ public final class OzoneConfigKeys { * so that a mini cluster is able to launch multiple containers on a node. * * When set to false (default), the container port will be specified as - * {@link #DFS_CONTAINER_IPC_PORT} and the default value will be specified - * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}. + * {@link #HDDS_CONTAINER_IPC_PORT} and the default value will be specified + * as {@link #HDDS_CONTAINER_IPC_PORT_DEFAULT}. */ - public static final String DFS_CONTAINER_IPC_RANDOM_PORT = - "dfs.container.ipc.random.port"; - public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = + public static final String HDDS_CONTAINER_IPC_RANDOM_PORT = + "hdds.container.ipc.random.port"; + public static final boolean HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = - "dfs.container.ratis.datastream.random.port"; + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = + "hdds.container.ratis.datastream.random.port"; public static final boolean - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY = - "dfs.container.chunk.write.sync"; - public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; + public static final String HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY = + "hdds.container.chunk.write.sync"; + public static final boolean HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; /** * Ratis Port where containers listen to. */ - public static final String DFS_CONTAINER_RATIS_IPC_PORT = - "dfs.container.ratis.ipc"; - public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; + public static final String HDDS_CONTAINER_RATIS_IPC_PORT = + "hdds.container.ratis.ipc.port"; + public static final int HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; /** * Ratis Port where containers listen to admin requests. */ - public static final String DFS_CONTAINER_RATIS_ADMIN_PORT = - "dfs.container.ratis.admin.port"; - public static final int DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; + public static final String HDDS_CONTAINER_RATIS_ADMIN_PORT = + "hdds.container.ratis.admin.port"; + public static final int HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; /** * Ratis Port where containers listen to server-to-server requests. */ - public static final String DFS_CONTAINER_RATIS_SERVER_PORT = - "dfs.container.ratis.server.port"; - public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; + public static final String HDDS_CONTAINER_RATIS_SERVER_PORT = + "hdds.container.ratis.server.port"; + public static final int HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; /** * Ratis Port where containers listen to datastream requests. */ - public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLED - = "dfs.container.ratis.datastream.enabled"; - public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED + = "hdds.container.ratis.datastream.enabled"; + public static final boolean HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT - = "dfs.container.ratis.datastream.port"; - public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_PORT + = "hdds.container.ratis.datastream.port"; + public static final int HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT = 9855; /** @@ -126,7 +126,7 @@ public final class OzoneConfigKeys { public static final String OZONE_FS_HSYNC_ENABLED = "ozone.fs.hsync.enabled"; public static final boolean OZONE_FS_HSYNC_ENABLED_DEFAULT - = false; + = true; /** * hsync lease soft limit. @@ -141,9 +141,9 @@ public final class OzoneConfigKeys { * When set to true, allocate a random free port for ozone container, so that * a mini cluster is able to launch multiple containers on a node. */ - public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = - "dfs.container.ratis.ipc.random.port"; - public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT = + "hdds.container.ratis.ipc.random.port"; + public static final boolean HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = @@ -333,97 +333,97 @@ public final class OzoneConfigKeys { public static final int OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY; + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = ScmConfigKeys. - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; // config settings to enable stateMachineData write timeout public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = - "dfs.container.ratis.datanode.storage.dir"; + public static final String HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = + "hdds.container.ratis.datanode.storage.dir"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP; - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP; + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; public static final String HDDS_DATANODE_PLUGINS_KEY = "hdds.datanode.plugins"; @@ -463,7 +463,7 @@ public final class OzoneConfigKeys { "hdds.datanode.replication.work.dir"; - public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 16 * 1024; + public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 8 * 1024; public static final String OZONE_CLIENT_READ_TIMEOUT = "ozone.client.read.timeout"; @@ -681,6 +681,20 @@ public final class OzoneConfigKeys { "hdds.scmclient.failover.max.retry"; + public static final String OZONE_XCEIVER_CLIENT_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY = + "ozone.xceiver.client.metrics.percentiles.intervals.seconds"; + + public static final String + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION = + "ozone.om.network.topology.refresh.duration"; + public static final String + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT = "1h"; + + public static final String OZONE_SECURITY_CRYPTO_COMPLIANCE_MODE = + "ozone.security.crypto.compliance.mode"; + public static final String OZONE_SECURITY_CRYPTO_COMPLIANCE_MODE_UNRESTRICTED = "unrestricted"; + + /** * There is no need to instantiate this class. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 9069c425e7d7..f2ad78eb842d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -119,6 +119,8 @@ public final class OzoneConsts { public static final long MB = KB * 1024L; public static final long GB = MB * 1024L; public static final long TB = GB * 1024L; + public static final long PB = TB * 1024L; + public static final long EB = PB * 1024L; /** * level DB names used by SCM and data nodes. @@ -387,6 +389,8 @@ private OzoneConsts() { /** Metadata stored in OmKeyInfo. */ public static final String HSYNC_CLIENT_ID = "hsyncClientId"; public static final String LEASE_RECOVERY = "leaseRecovery"; + public static final String DELETED_HSYNC_KEY = "deletedHsyncKey"; + public static final String OVERWRITTEN_HSYNC_KEY = "overwrittenHsyncKey"; public static final String FORCE_LEASE_RECOVERY_ENV = "OZONE.CLIENT.RECOVER.LEASE.FORCE"; //GDPR @@ -589,4 +593,9 @@ private OzoneConsts() { */ public static final String COMPACTION_LOG_TABLE = "compactionLogTable"; + + /** + * S3G multipart upload request's ETag header key. + */ + public static final String ETAG = "ETag"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index 985c238fd77b..c55945d53742 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -40,6 +40,8 @@ public enum OzoneManagerVersion implements ComponentVersion { LIGHTWEIGHT_LIST_KEYS(4, "OzoneManager version that supports lightweight" + " listKeys API."), + OBJECT_TAG(5, "OzoneManager version that supports object tags"), + FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java index 5fab7eacdf6d..1d596bf70077 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.ozone.common; +import org.apache.hadoop.hdds.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.lang.reflect.Field; import java.nio.ByteBuffer; import java.util.zip.Checksum; @@ -35,6 +39,8 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer { private final Checksum checksum; private static final Field IS_READY_ONLY_FIELD; + // To access Checksum.update(ByteBuffer) API from Java 9+. + private static final MethodHandle BYTE_BUFFER_UPDATE; static { Field f = null; @@ -46,6 +52,18 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer { LOG.error("No isReadOnly field in ByteBuffer", e); } IS_READY_ONLY_FIELD = f; + + MethodHandle byteBufferUpdate = null; + if (JavaUtils.isJavaVersionAtLeast(9)) { + try { + byteBufferUpdate = MethodHandles.publicLookup().findVirtual(Checksum.class, "update", + MethodType.methodType(void.class, ByteBuffer.class)); + } catch (Throwable t) { + throw new IllegalStateException("Failed to lookup Checksum.update(ByteBuffer)."); + } + } + BYTE_BUFFER_UPDATE = byteBufferUpdate; + } public ChecksumByteBufferImpl(Checksum impl) { @@ -57,6 +75,17 @@ public ChecksumByteBufferImpl(Checksum impl) { // should be refactored to simply call checksum.update(buffer), as the // Checksum interface has been enhanced to allow this since Java 9. public void update(ByteBuffer buffer) { + // Prefer JDK9+ implementation that allows ByteBuffer. This allows DirectByteBuffer to be checksum directly in + // native memory. + if (BYTE_BUFFER_UPDATE != null) { + try { + BYTE_BUFFER_UPDATE.invokeExact(checksum, buffer); + return; + } catch (Throwable e) { + throw new IllegalStateException("Error invoking " + BYTE_BUFFER_UPDATE, e); + } + } + // this is a hack to not do memory copy. if (IS_READY_ONLY_FIELD != null) { try { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java index aea5c510661e..4f6bfa450b0f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java @@ -21,6 +21,7 @@ import java.util.Collections; import java.util.List; + import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos @@ -33,7 +34,7 @@ */ public class ChecksumData { - private ChecksumType type; + private final ChecksumType type; // Checksum will be computed for every bytesPerChecksum number of bytes and // stored sequentially in checksumList private final int bytesPerChecksum; @@ -47,7 +48,7 @@ public ChecksumData(ChecksumType checksumType, int bytesPerChecksum, List checksums) { this.type = checksumType; this.bytesPerChecksum = bytesPerChecksum; - this.checksums = checksums; + this.checksums = Collections.unmodifiableList(checksums); } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java index 3948b5f04fc0..058934c2f27d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java @@ -27,10 +27,12 @@ import org.apache.hadoop.hdds.scm.ByteStringConversion; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.util.UncheckedAutoCloseable; /** Buffer for a block chunk. */ -public interface ChunkBuffer { +public interface ChunkBuffer extends UncheckedAutoCloseable { /** Similar to {@link ByteBuffer#allocate(int)}. */ static ChunkBuffer allocate(int capacity) { @@ -49,7 +51,8 @@ static ChunkBuffer allocate(int capacity, int increment) { if (increment > 0 && increment < capacity) { return new IncrementalChunkBuffer(capacity, increment, false); } - return new ChunkBufferImplWithByteBuffer(ByteBuffer.allocate(capacity)); + CodecBuffer codecBuffer = CodecBuffer.allocateDirect(capacity); + return new ChunkBufferImplWithByteBuffer(codecBuffer.asWritableByteBuffer(), codecBuffer); } /** Wrap the given {@link ByteBuffer} as a {@link ChunkBuffer}. */ @@ -86,6 +89,9 @@ default boolean hasRemaining() { /** Similar to {@link ByteBuffer#clear()}. */ ChunkBuffer clear(); + default void close() { + } + /** Similar to {@link ByteBuffer#put(ByteBuffer)}. */ ChunkBuffer put(ByteBuffer b); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java index 0cf49681cb16..fe2ee5fa8acb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java @@ -28,13 +28,27 @@ import java.util.function.Function; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.util.UncheckedAutoCloseable; /** {@link ChunkBuffer} implementation using a single {@link ByteBuffer}. */ final class ChunkBufferImplWithByteBuffer implements ChunkBuffer { private final ByteBuffer buffer; + private final UncheckedAutoCloseable underlying; ChunkBufferImplWithByteBuffer(ByteBuffer buffer) { + this(buffer, null); + } + + ChunkBufferImplWithByteBuffer(ByteBuffer buffer, UncheckedAutoCloseable underlying) { this.buffer = Objects.requireNonNull(buffer, "buffer == null"); + this.underlying = underlying; + } + + @Override + public void close() { + if (underlying != null) { + underlying.close(); + } } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java index 5a63c09f1234..dda4fae0d2b5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.common; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import java.io.IOException; @@ -47,6 +48,8 @@ final class IncrementalChunkBuffer implements ChunkBuffer { private final int limitIndex; /** Buffer list to be allocated incrementally. */ private final List buffers; + /** The underlying buffers. */ + private final List underlying; /** Is this a duplicated buffer? (for debug only) */ private final boolean isDuplicated; /** The index of the first non-full buffer. */ @@ -58,11 +61,18 @@ final class IncrementalChunkBuffer implements ChunkBuffer { this.limit = limit; this.increment = increment; this.limitIndex = limit / increment; - this.buffers = new ArrayList<>( - limitIndex + (limit % increment == 0 ? 0 : 1)); + int size = limitIndex + (limit % increment == 0 ? 0 : 1); + this.buffers = new ArrayList<>(size); + this.underlying = isDuplicated ? Collections.emptyList() : new ArrayList<>(size); this.isDuplicated = isDuplicated; } + @Override + public void close() { + underlying.forEach(CodecBuffer::release); + underlying.clear(); + } + /** @return the capacity for the buffer at the given index. */ private int getBufferCapacityAtIndex(int i) { Preconditions.checkArgument(i >= 0); @@ -99,6 +109,7 @@ private ByteBuffer getAtIndex(int i) { /** @return the i-th buffer. It may allocate buffers. */ private ByteBuffer getAndAllocateAtIndex(int index) { + Preconditions.checkState(!isDuplicated, "Duplicated buffer is readonly."); Preconditions.checkArgument(index >= 0); // never allocate over limit if (limit % increment == 0) { @@ -115,7 +126,9 @@ private ByteBuffer getAndAllocateAtIndex(int index) { // allocate upto the given index ByteBuffer b = null; for (; i <= index; i++) { - b = ByteBuffer.allocate(getBufferCapacityAtIndex(i)); + final CodecBuffer c = CodecBuffer.allocateDirect(getBufferCapacityAtIndex(i)); + underlying.add(c); + b = c.asWritableByteBuffer(); buffers.add(b); } return b; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java index 97122e689871..4c78ca777f21 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java @@ -18,10 +18,10 @@ package org.apache.hadoop.ozone.common.statemachine; -import com.google.common.base.Supplier; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; +import com.google.common.collect.ImmutableSet; import java.util.HashMap; import java.util.Map; @@ -33,16 +33,16 @@ * @param events allowed */ public class StateMachine, EVENT extends Enum> { - private STATE initialState; - private Set finalStates; + private final STATE initialState; + private final ImmutableSet finalStates; private final LoadingCache> transitions = CacheBuilder.newBuilder().build( - CacheLoader.from((Supplier>) () -> new HashMap())); + CacheLoader.from(() -> new HashMap<>())); public StateMachine(STATE initState, Set finalStates) { this.initialState = initState; - this.finalStates = finalStates; + this.finalStates = finalStates == null ? ImmutableSet.of() : ImmutableSet.copyOf(finalStates); } public STATE getInitialState() { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java index cbbbb70278a7..75408d65a661 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java @@ -64,7 +64,6 @@ public ChunkInfo(String chunkName, long offset, long len) { * * @param key - Key Name. * @param value - Value. - * @throws IOException */ public void addMetadata(String key, String value) throws IOException { synchronized (this.metadata) { @@ -80,7 +79,6 @@ public void addMetadata(String key, String value) throws IOException { * * @param info - Protobuf class * @return ChunkInfo - * @throws IOException */ public static ChunkInfo getFromProtoBuf(ContainerProtos.ChunkInfo info) throws IOException { @@ -182,14 +180,9 @@ public ByteString getStripeChecksum() { public void setStripeChecksum(ByteString stripeChecksum) { this.stripeChecksum = stripeChecksum; } - - /** - * Returns Metadata associated with this Chunk. - * - * @return - Map of Key,values. - */ - public Map getMetadata() { - return metadata; + + public String getMetadata(String key) { + return metadata.get(key); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java index 23c3dbaf1520..6bd83b44a93f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java @@ -200,11 +200,11 @@ public long getUnknownMessagesReceived() { return unknownMessagesReceived.value(); } - public MutableRate getGrpcQueueTime() { + MutableRate getGrpcQueueTime() { return grpcQueueTime; } - public MutableRate getGrpcProcessingTime() { + MutableRate getGrpcProcessingTime() { return grpcProcessingTime; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java index ccf33019aebc..727d7922539f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java @@ -19,9 +19,6 @@ import org.apache.hadoop.util.Time; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; import java.util.Objects; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicLong; @@ -49,9 +46,9 @@ public class Lease { private boolean expired; /** - * Functions to be called in case of timeout. + * Function to be called in case of timeout. */ - private List> callbacks; + private Callable callback; /** @@ -63,11 +60,7 @@ public class Lease { * Lease lifetime in milliseconds */ public Lease(T resource, long timeout) { - this.resource = resource; - this.leaseTimeout = new AtomicLong(timeout); - this.callbacks = Collections.synchronizedList(new ArrayList<>()); - this.creationTime = Time.monotonicNow(); - this.expired = false; + this(resource, timeout, null); } /** @@ -81,8 +74,11 @@ public Lease(T resource, long timeout) { * Callback registered to be triggered when lease expire */ public Lease(T resource, long timeout, Callable callback) { - this(resource, timeout); - callbacks.add(callback); + this.resource = resource; + this.leaseTimeout = new AtomicLong(timeout); + this.callback = callback; + this.creationTime = Time.monotonicNow(); + this.expired = false; } /** @@ -176,15 +172,15 @@ public String toString() { * * @return callbacks to be executed */ - List> getCallbacks() { - return callbacks; + Callable getCallback() { + return callback; } /** * Expires/Invalidates the lease. */ void invalidate() { - callbacks = null; + callback = null; expired = true; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java index 3f2d5fbe9740..80ca937c0063 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java @@ -20,7 +20,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; import java.util.concurrent.Callable; /** @@ -33,19 +32,19 @@ public class LeaseCallbackExecutor implements Runnable { LoggerFactory.getLogger(LeaseCallbackExecutor.class); private final T resource; - private final List> callbacks; + private final Callable callback; /** * Constructs LeaseCallbackExecutor instance with list of callbacks. * * @param resource * The resource for which the callbacks are executed - * @param callbacks - * Callbacks to be executed by this executor + * @param callback + * Callback to be executed by this executor */ - public LeaseCallbackExecutor(T resource, List> callbacks) { + public LeaseCallbackExecutor(T resource, Callable callback) { this.resource = resource; - this.callbacks = callbacks; + this.callback = callback; } @Override @@ -53,7 +52,7 @@ public void run() { if (LOG.isDebugEnabled()) { LOG.debug("Executing callbacks for lease on {}", resource); } - for (Callable callback : callbacks) { + if (callback != null) { try { callback.call(); } catch (Exception e) { @@ -62,5 +61,4 @@ public void run() { } } } - } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java index 7dfcf3eb8c8b..bb4ccc1ac1e2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.lease; -import java.util.List; import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; @@ -268,10 +267,10 @@ public void run() { long remainingTime = lease.getRemainingTime(); if (remainingTime <= 0) { //Lease has timed out - List> leaseCallbacks = lease.getCallbacks(); + Callable leaseCallback = lease.getCallback(); release(resource); executorService.execute( - new LeaseCallbackExecutor<>(resource, leaseCallbacks)); + new LeaseCallbackExecutor<>(resource, leaseCallback)); } else { sleepTime = Math.min(remainingTime, sleepTime); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java index b3ffe59f1d7d..b3142c579f6c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.util; -import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -122,9 +121,7 @@ public void run() { * This is exposed purely for testing: do not invoke it. * @return the number of shutdown hooks which timed out. */ - @InterfaceAudience.Private - @VisibleForTesting - int executeShutdown() { + private int executeShutdown() { int timeouts = 0; for (HookEntry entry: getShutdownHooksInOrder()) { Future future = EXECUTOR.submit(entry.getHook()); @@ -190,9 +187,7 @@ public static ShutdownHookManager get() { * {@link org.apache.hadoop.ozone.conf.OzoneServiceConfig * #OZONE_SHUTDOWN_TIMEOUT_MINIMUM} */ - @InterfaceAudience.Private - @VisibleForTesting - static long getShutdownTimeout(ConfigurationSource conf) { + private static long getShutdownTimeout(ConfigurationSource conf) { long duration = HddsUtils.getShutDownTimeOut(conf); if (duration < OZONE_SHUTDOWN_TIMEOUT_MINIMUM) { duration = OZONE_SHUTDOWN_TIMEOUT_MINIMUM; @@ -204,9 +199,7 @@ static long getShutdownTimeout(ConfigurationSource conf) { * Private structure to store ShutdownHook, its priority and timeout * settings. */ - @InterfaceAudience.Private - @VisibleForTesting - static class HookEntry { + private static class HookEntry { private final Runnable hook; private final int priority; private final long timeout; @@ -260,12 +253,9 @@ TimeUnit getTimeUnit() { private final Set hooks = Collections.synchronizedSet(new HashSet<>()); - private AtomicBoolean shutdownInProgress = new AtomicBoolean(false); + private final AtomicBoolean shutdownInProgress = new AtomicBoolean(false); - //private to constructor to ensure singularity - @VisibleForTesting - @InterfaceAudience.Private - ShutdownHookManager() { + private ShutdownHookManager() { } /** @@ -274,21 +264,13 @@ TimeUnit getTimeUnit() { * * @return the list of shutdownHooks in order of execution. */ - @InterfaceAudience.Private - @VisibleForTesting - List getShutdownHooksInOrder() { - List list; + private List getShutdownHooksInOrder() { + List list; synchronized (hooks) { - list = new ArrayList(hooks); + list = new ArrayList<>(hooks); } - Collections.sort(list, new Comparator< HookEntry >() { - - //reversing comparison so highest priority hooks are first - @Override - public int compare(HookEntry o1, HookEntry o2) { - return o2.priority - o1.priority; - } - }); + //reversing comparison so highest priority hooks are first + list.sort(Comparator.comparing(HookEntry::getPriority).reversed()); return list; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/PerformanceMetricsInitializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/PerformanceMetricsInitializer.java index 83d61cab6857..0a50eab19509 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/PerformanceMetricsInitializer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/PerformanceMetricsInitializer.java @@ -70,7 +70,7 @@ public static void initialize(T source, MetricsRegistry registry, * @param intervals intervals for quantiles * @return an instance of PerformanceMetrics */ - private static PerformanceMetrics getMetrics( + public static PerformanceMetrics getMetrics( MetricsRegistry registry, String name, String description, String sampleName, String valueName, int[] intervals) { return new PerformanceMetrics( diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/StringWithByteString.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/StringWithByteString.java new file mode 100644 index 000000000000..0e99acba3f49 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/StringWithByteString.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import com.google.protobuf.ByteString; +import net.jcip.annotations.Immutable; + +import java.util.Objects; + +/** + * Class to encapsulate and cache the conversion of a Java String to a ByteString. + */ +@Immutable +public final class StringWithByteString { + public static StringWithByteString valueOf(String string) { + return string != null ? new StringWithByteString(string, ByteString.copyFromUtf8(string)) : null; + } + + private final String string; + private final ByteString bytes; + + public StringWithByteString(String string, ByteString bytes) { + this.string = Objects.requireNonNull(string, "string == null"); + this.bytes = Objects.requireNonNull(bytes, "bytes == null"); + } + + public String getString() { + return string; + } + + public ByteString getBytes() { + return bytes; + } + + @Override + public String toString() { + return getString(); + } +} diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 47067de5fede..325c8a2c0a5d 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -46,26 +46,26 @@ - dfs.container.ipc + hdds.container.ipc.port 9859 OZONE, CONTAINER, MANAGEMENT The ipc port number of container. - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled false OZONE, CONTAINER, RATIS, DATASTREAM It specifies whether to enable data stream of container. - dfs.container.ratis.datastream.port + hdds.container.ratis.datastream.port 9855 OZONE, CONTAINER, RATIS, DATASTREAM The datastream port number of container. - dfs.container.ratis.datastream.random.port + hdds.container.ratis.datastream.random.port false OZONE, CONTAINER, RATIS, DATASTREAM Allocates a random free port for ozone container datastream. @@ -73,7 +73,7 @@ - dfs.container.ipc.random.port + hdds.container.ipc.random.port false OZONE, DEBUG, CONTAINER Allocates a random free port for ozone container. This is used @@ -82,7 +82,7 @@ - dfs.container.chunk.write.sync + hdds.container.chunk.write.sync false OZONE, CONTAINER, MANAGEMENT Determines whether the chunk writes in the container happen as @@ -90,19 +90,19 @@ - dfs.container.ratis.statemachinedata.sync.timeout + hdds.container.ratis.statemachinedata.sync.timeout 10s OZONE, DEBUG, CONTAINER, RATIS Timeout for StateMachine data writes by Ratis. - dfs.container.ratis.statemachinedata.sync.retries + hdds.container.ratis.statemachinedata.sync.retries OZONE, DEBUG, CONTAINER, RATIS Number of times the WriteStateMachineData op will be tried before failing. If the value is not configured, it will default - to (hdds.ratis.rpc.slowness.timeout / dfs.container.ratis.statemachinedata.sync.timeout), + to (hdds.ratis.rpc.slowness.timeout / hdds.container.ratis.statemachinedata.sync.timeout), which means that the WriteStatMachineData will be retried for every sync timeout until the configured slowness timeout is hit, after which the StateMachine will close down the pipeline. @@ -112,36 +112,38 @@ - dfs.container.ratis.log.queue.num-elements + hdds.container.ratis.log.queue.num-elements 1024 OZONE, DEBUG, CONTAINER, RATIS Limit for the number of operations in Ratis Log Worker. - dfs.container.ratis.log.queue.byte-limit + hdds.container.ratis.log.queue.byte-limit 4GB OZONE, DEBUG, CONTAINER, RATIS Byte limit for Ratis Log Worker queue. - dfs.container.ratis.log.appender.queue.num-elements - 1 + hdds.container.ratis.log.appender.queue.num-elements + 1024 OZONE, DEBUG, CONTAINER, RATIS Limit for number of append entries in ratis leader's log appender queue. - dfs.container.ratis.log.appender.queue.byte-limit + hdds.container.ratis.log.appender.queue.byte-limit + 32MB OZONE, DEBUG, CONTAINER, RATIS Byte limit for ratis leader's log appender queue. - dfs.container.ratis.log.purge.gap + hdds.container.ratis.log.purge.gap + 1000000 OZONE, DEBUG, CONTAINER, RATIS Purge gap between the last purged commit index @@ -149,7 +151,7 @@ - dfs.container.ratis.datanode.storage.dir + hdds.container.ratis.datanode.storage.dir OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS This directory is used for storing Ratis metadata like logs. If @@ -223,7 +225,7 @@ - dfs.container.ratis.enabled + hdds.container.ratis.enabled false OZONE, MANAGEMENT, PIPELINE, RATIS Ozone supports different kinds of replication pipelines. Ratis @@ -232,25 +234,26 @@ - dfs.container.ratis.ipc + hdds.container.ratis.ipc.port 9858 OZONE, CONTAINER, PIPELINE, RATIS The ipc port number of container for clients. - dfs.container.ratis.admin.port + hdds.container.ratis.admin.port 9857 OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT The ipc port number of container for admin requests. - dfs.container.ratis.server.port + hdds.container.ratis.server.port 9856 OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT The ipc port number of container for server-server communication. - dfs.container.ratis.ipc.random.port + hdds.container.ratis.ipc.random.port + false OZONE,DEBUG Allocates a random free port for ozone ratis port for the @@ -259,7 +262,7 @@ - dfs.container.ratis.rpc.type + hdds.container.ratis.rpc.type GRPC OZONE, RATIS, MANAGEMENT Ratis supports different kinds of transports like netty, GRPC, @@ -268,7 +271,7 @@ - dfs.ratis.snapshot.threshold + hdds.ratis.snapshot.threshold 10000 OZONE, RATIS Number of transactions after which a ratis snapshot should be @@ -276,16 +279,16 @@ - dfs.container.ratis.statemachine.max.pending.apply-transactions + hdds.container.ratis.statemachine.max.pending.apply-transactions 10000 OZONE, RATIS Maximum number of pending apply transactions in a data pipeline. The default value is kept same as default snapshot threshold - dfs.ratis.snapshot.threshold. + hdds.ratis.snapshot.threshold. - dfs.container.ratis.num.write.chunk.threads.per.volume + hdds.container.ratis.num.write.chunk.threads.per.volume 10 OZONE, RATIS, PERFORMANCE Maximum number of threads in the thread pool that Datanode @@ -295,7 +298,8 @@ - dfs.container.ratis.leader.pending.bytes.limit + hdds.container.ratis.leader.pending.bytes.limit + 1GB OZONE, RATIS, PERFORMANCE Limit on the total bytes of pending requests after which @@ -303,7 +307,7 @@ - dfs.container.ratis.replication.level + hdds.container.ratis.replication.level MAJORITY OZONE, RATIS Replication level to be used by datanode for submitting a @@ -312,7 +316,7 @@ - dfs.container.ratis.num.container.op.executors + hdds.container.ratis.num.container.op.executors 10 OZONE, RATIS, PERFORMANCE Number of executors that will be used by Ratis to execute @@ -320,7 +324,7 @@ - dfs.container.ratis.segment.size + hdds.container.ratis.segment.size 64MB OZONE, RATIS, PERFORMANCE The size of the raft segment file used @@ -328,7 +332,7 @@ - dfs.container.ratis.segment.preallocated.size + hdds.container.ratis.segment.preallocated.size 4MB OZONE, RATIS, PERFORMANCE The pre-allocated file size for raft segment used @@ -336,13 +340,13 @@ - dfs.ratis.server.retry-cache.timeout.duration + hdds.ratis.server.retry-cache.timeout.duration 600000ms OZONE, RATIS, MANAGEMENT Retry Cache entry timeout for ratis server. - dfs.ratis.leader.election.minimum.timeout.duration + hdds.ratis.leader.election.minimum.timeout.duration 5s OZONE, RATIS, MANAGEMENT The minimum timeout duration for ratis leader election. @@ -707,7 +711,7 @@ For production clusters or any time you care about performance, it is recommended that ozone.om.db.dirs, ozone.scm.db.dirs and - dfs.container.ratis.datanode.storage.dir be configured separately. + hdds.container.ratis.datanode.storage.dir be configured separately. @@ -856,8 +860,8 @@ - ozone.chunk.list.incremental - false + ozone.incremental.chunk.list + true OZONE, CLIENT, DATANODE, PERFORMANCE By default, a writer client sends full chunk list of a block when it @@ -1501,40 +1505,12 @@ to the OM. - - - hdds.rest.rest-csrf.enabled - false - - If true, then enables Object Store REST server protection against - cross-site request forgery (CSRF). - - - hdds.rest.http-address 0.0.0.0:9880 The http address of Object Store REST server inside the datanode. - - - - hdds.rest.netty.high.watermark - 65535 - - High watermark configuration to Netty for Object Store REST server. - - - - - hdds.rest.netty.low.watermark - 32768 - - Low watermark configuration to Netty for Object Store REST server. - - - hdds.datanode.plugins @@ -1931,7 +1907,14 @@ Setting this interval equal to the metrics sampling time ensures more detailed metrics. - + + ozone.xceiver.client.metrics.percentiles.intervals.seconds + 60 + XCEIVER, PERFORMANCE + Specifies the interval in seconds for the rollover of XceiverClient MutableQuantiles metrics. + Setting this interval equal to the metrics sampling time ensures more detailed metrics. + + ozone.om.save.metrics.interval 5m @@ -1969,7 +1952,15 @@ set, ozone.security.http.kerberos.enabled should be set to true. - + + ozone.security.crypto.compliance.mode + none + OZONE, SECURITY, HDDS, CRYPTO_COMPLIANCE + Based on this property the security compliance mode + is loaded and enables filtering cryptographic configuration options + according to the specified compliance mode. + + ozone.client.read.timeout @@ -2147,7 +2138,7 @@ - ozone.om.ratis.server.leaderelection.pre-vote + ozone.om.ratis.server.leaderelection.pre-vote true OZONE, OM, RATIS, MANAGEMENT Enable/disable OM HA leader election pre-vote phase. @@ -2164,6 +2155,15 @@ + + ozone.om.ratis.server.close.threshold + 60s + OZONE, OM, RATIS + + Raft Server will close if JVM pause longer than the threshold. + + + ozone.om.ratis.snapshot.dir @@ -2248,6 +2248,14 @@ OZONE, SECURITY, KERBEROS The OzoneManager service principal. Ex om/_HOST@REALM.COM + + ozone.om.kerberos.principal.pattern + * + + A client-side RegEx that can be configured to control + allowed realms to authenticate with (useful in cross-realm env.) + + ozone.om.http.auth.kerberos.principal HTTP/_HOST@REALM @@ -2265,12 +2273,45 @@ principal if SPNEGO is enabled for om http server. + + ssl.server.keystore.type + jks + OZONE, SECURITY, CRYPTO_COMPLIANCE + + The keystore type for HTTP Servers used in ozone. + + + + ssl.server.truststore.type + jks + OZONE, SECURITY, CRYPTO_COMPLIANCE + + The truststore type for HTTP Servers used in ozone. + + hdds.key.len 2048 - SCM, HDDS, X509, SECURITY + SCM, HDDS, X509, SECURITY, CRYPTO_COMPLIANCE - SCM CA key length. This is an algorithm-specific metric, such as modulus length, specified in number of bits. + SCM CA key length. This is an algorithm-specific metric, such as modulus + length, specified in number of bits. + + + + hdds.key.algo + RSA + SCM, HDDS, X509, SECURITY, CRYPTO_COMPLIANCE + + SCM CA key algorithm. + + + + hdds.security.provider + BC + OZONE, HDDS, X509, SECURITY, CRYPTO_COMPLIANCE + + The main security provider used for various cryptographic algorithms. @@ -2314,7 +2355,7 @@ hdds.grpc.tls.provider OPENSSL - OZONE, HDDS, SECURITY, TLS + OZONE, HDDS, SECURITY, TLS, CRYPTO_COMPLIANCE HDDS GRPC server TLS provider. @@ -2358,7 +2399,7 @@ hdds.x509.signature.algorithm SHA256withRSA - OZONE, HDDS, SECURITY + OZONE, HDDS, SECURITY, CRYPTO_COMPLIANCE X509 signature certificate. @@ -2796,6 +2837,14 @@ manager admin protocol. + + ozone.security.reconfigure.protocol.acl + * + SECURITY + + Comma separated list of users and groups allowed to access reconfigure protocol. + + hdds.datanode.http.auth.kerberos.principal @@ -3792,6 +3841,14 @@ Wait duration before which close container is send to DN. + + ozone.om.network.topology.refresh.duration + 1h + SCM, OZONE, OM + The duration at which we periodically fetch the updated network + topology cluster tree from SCM. + + ozone.scm.ha.ratis.server.snapshot.creation.gap 1024 @@ -4116,7 +4173,7 @@ ozone.fs.hsync.enabled - false + true OZONE, CLIENT Enable hsync/hflush. By default they are disabled. @@ -4276,7 +4333,7 @@ - ozone.om.snapshot.diff.cleanup.service.run.internal + ozone.om.snapshot.diff.cleanup.service.run.interval 1m OZONE, OM @@ -4296,11 +4353,12 @@ - ozone.om.snapshot.sst_dumptool.pool.size - 1 + ozone.om.snapshot.cache.cleanup.service.run.interval + 1m OZONE, OM - Threadpool size for SST Dumptool which would be used for computing snapdiff when native library is enabled. + Interval at which snapshot cache clean up will run. + Uses millisecond by default when no time unit is specified. @@ -4313,15 +4371,6 @@ - - ozone.om.snapshot.sst_dumptool.buffer.size - 8KB - OZONE, OM - - Buffer size for SST Dumptool Pipe which would be used for computing snapdiff when native library is enabled. - - - ozone.om.snapshot.diff.max.allowed.keys.changed.per.job 10000000 @@ -4371,7 +4420,7 @@ hdds.secret.key.algorithm HmacSHA256 - SCM, SECURITY + SCM, SECURITY, CRYPTO_COMPLIANCE The algorithm that SCM uses to generate symmetric secret keys. A valid algorithm is the one supported by KeyGenerator, as described at diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/JsonTestUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/JsonTestUtils.java new file mode 100644 index 000000000000..ac19c30a4ad5 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/JsonTestUtils.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds; + +import com.fasterxml.jackson.annotation.JsonInclude; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * JSON Utility functions used in ozone for Test classes. + */ +public final class JsonTestUtils { + + // Reuse ObjectMapper instance for improving performance. + // ObjectMapper is thread safe as long as we always configure instance + // before use. + private static final ObjectMapper MAPPER; + private static final ObjectWriter WRITER; + + static { + MAPPER = new ObjectMapper() + .setSerializationInclusion(JsonInclude.Include.NON_NULL) + .registerModule(new JavaTimeModule()) + .configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false); + WRITER = MAPPER.writerWithDefaultPrettyPrinter(); + } + + private JsonTestUtils() { + // Never constructed + } + + public static String toJsonStringWithDefaultPrettyPrinter(Object obj) + throws IOException { + return WRITER.writeValueAsString(obj); + } + + public static String toJsonString(Object obj) throws IOException { + return MAPPER.writeValueAsString(obj); + } + + public static JsonNode valueToJsonNode(Object value) { + return MAPPER.valueToTree(value); + } + + public static JsonNode readTree(String content) throws IOException { + return MAPPER.readTree(content); + } + + public static List> readTreeAsListOfMaps(String json) + throws IOException { + return MAPPER.readValue(json, + new TypeReference>>() { + }); + } + + /** + * Converts a JsonNode into a Java object of the specified type. + * @param node The JsonNode to convert. + * @param valueType The target class of the Java object. + * @param The type of the Java object. + * @return A Java object of type T, populated with data from the JsonNode. + * @throws IOException + */ + public static T treeToValue(JsonNode node, Class valueType) + throws IOException { + return MAPPER.treeToValue(node, valueType); + } + +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java index 76b6a0db89b3..b20ce53597eb 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdds.fs; +import java.util.concurrent.atomic.AtomicLong; + /** * {@link SpaceUsageSource} implementations for testing. */ @@ -35,6 +37,26 @@ public static SpaceUsageSource fixed(long capacity, long available, return new SpaceUsageSource.Fixed(capacity, available, used); } + /** @return {@code SpaceUsageSource} with fixed capacity and dynamic usage */ + public static SpaceUsageSource of(long capacity, AtomicLong used) { + return new SpaceUsageSource() { + @Override + public long getUsedSpace() { + return used.get(); + } + + @Override + public long getCapacity() { + return capacity; + } + + @Override + public long getAvailable() { + return getCapacity() - getUsedSpace(); + } + }; + } + private MockSpaceUsageSource() { throw new UnsupportedOperationException("no instances"); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java index 674c1233dee6..8523861000e7 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java @@ -142,6 +142,20 @@ public void savesValueOnShutdown() { verify(executor).shutdown(); } + @Test + public void testDecrementDoesNotGoNegative() { + SpaceUsageCheckParams params = paramsBuilder(new AtomicLong(50)) + .withRefresh(Duration.ZERO) + .build(); + CachingSpaceUsageSource subject = new CachingSpaceUsageSource(params); + + // Try to decrement more than the current value + subject.decrementUsedSpace(100); + + // Check that the value has been set to 0 + assertEquals(0, subject.getUsedSpace()); + } + private static long missingInitialValue() { return 0L; } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java index 9567fa2c281e..0d30d43dc01f 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java @@ -19,8 +19,10 @@ import org.apache.hadoop.util.PureJavaCrc32; import org.apache.hadoop.util.PureJavaCrc32C; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import org.apache.commons.lang3.RandomUtils; import java.util.zip.Checksum; @@ -45,6 +47,23 @@ public void testPureJavaCrc32CByteBuffer() { new VerifyChecksumByteBuffer(expected, testee).testCorrectness(); } + @Test + public void testWithDirectBuffer() { + final ChecksumByteBuffer checksum = ChecksumByteBufferFactory.crc32CImpl(); + byte[] value = "test".getBytes(StandardCharsets.UTF_8); + checksum.reset(); + checksum.update(value, 0, value.length); + long checksum1 = checksum.getValue(); + + ByteBuffer byteBuffer = ByteBuffer.allocateDirect(value.length); + byteBuffer.put(value).rewind(); + checksum.reset(); + checksum.update(byteBuffer); + long checksum2 = checksum.getValue(); + + Assertions.assertEquals(checksum1, checksum2); + } + static class VerifyChecksumByteBuffer { private final Checksum expected; private final ChecksumByteBuffer testee; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java index 3d6d38f3d3bd..b5212825e58b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java @@ -29,7 +29,11 @@ import org.apache.hadoop.hdds.utils.MockGatheringChannel; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.CodecTestUtil; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -46,6 +50,16 @@ private static int nextInt(int n) { return ThreadLocalRandom.current().nextInt(n); } + @BeforeAll + public static void beforeAll() { + CodecBuffer.enableLeakDetection(); + } + + @AfterEach + public void after() throws Exception { + CodecTestUtil.gc(); + } + @Test @Timeout(1) void testImplWithByteBuffer() throws IOException { @@ -59,7 +73,9 @@ void testImplWithByteBuffer() throws IOException { private static void runTestImplWithByteBuffer(int n) throws IOException { final byte[] expected = new byte[n]; ThreadLocalRandom.current().nextBytes(expected); - runTestImpl(expected, 0, ChunkBuffer.allocate(n)); + try (ChunkBuffer c = ChunkBuffer.allocate(n)) { + runTestImpl(expected, 0, c); + } } @Test @@ -78,8 +94,9 @@ void testIncrementalChunkBuffer() throws IOException { private static void runTestIncrementalChunkBuffer(int increment, int n) throws IOException { final byte[] expected = new byte[n]; ThreadLocalRandom.current().nextBytes(expected); - runTestImpl(expected, increment, - new IncrementalChunkBuffer(n, increment, false)); + try (IncrementalChunkBuffer c = new IncrementalChunkBuffer(n, increment, false)) { + runTestImpl(expected, increment, c); + } } @Test diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java index 24feb69389b1..15a4dde0210d 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java @@ -52,5 +52,6 @@ public enum ConfigTag { TLS, TOKEN, UPGRADE, - X509 + X509, + CRYPTO_COMPLIANCE } diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java index 4ed59669a9df..e121e4333a0d 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java @@ -118,7 +118,7 @@ void set(ConfigurationTarget target, String key, Object value, SIZE { @Override Object parse(String value, Config config, Class type, String key) { - StorageSize measure = StorageSize.parse(value); + StorageSize measure = StorageSize.parse(value, config.sizeUnit()); long val = Math.round(measure.getUnit().toBytes(measure.getValue())); if (type == int.class) { return (int) val; @@ -130,9 +130,9 @@ Object parse(String value, Config config, Class type, String key) { void set(ConfigurationTarget target, String key, Object value, Config config) { if (value instanceof Long) { - target.setStorageSize(key, (long) value, StorageUnit.BYTES); + target.setStorageSize(key, (long) value, config.sizeUnit()); } else if (value instanceof Integer) { - target.setStorageSize(key, (int) value, StorageUnit.BYTES); + target.setStorageSize(key, (int) value, config.sizeUnit()); } else { throw new ConfigurationException("Unsupported type " + value.getClass() + " for " + key); diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml index 0791ffb9eab0..f68fa91db864 100644 --- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml @@ -15,18 +15,6 @@ limitations under the License. --> - - - - - - - - - - - - diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java index e26610b357e7..8b0b3a7ca239 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java @@ -20,12 +20,13 @@ import com.google.protobuf.BlockingService; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.ReconfigurationHandler; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos; -import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; +import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolDatanodePB; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; @@ -66,6 +67,10 @@ protected HddsDatanodeClientProtocolServer( HDDS_DATANODE_CLIENT_ADDRESS_KEY, HddsUtils.getDatanodeRpcAddress(conf), rpcServer); datanodeDetails.setPort(CLIENT_RPC, clientRpcAddress.getPort()); + if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, + false)) { + rpcServer.refreshServiceAcl(conf, HddsPolicyProvider.getInstance()); + } } public void start() { @@ -97,7 +102,7 @@ private RPC.Server getRpcServer(OzoneConfiguration configuration, InetSocketAddress rpcAddress = HddsUtils.getDatanodeRpcAddress(conf); // Add reconfigureProtocolService. RPC.setProtocolEngine( - configuration, ReconfigureProtocolPB.class, ProtobufRpcEngine.class); + configuration, ReconfigureProtocolDatanodePB.class, ProtobufRpcEngine.class); final int handlerCount = conf.getInt(HDDS_DATANODE_HANDLER_COUNT_KEY, HDDS_DATANODE_HANDLER_COUNT_DEFAULT); @@ -108,7 +113,7 @@ private RPC.Server getRpcServer(OzoneConfiguration configuration, reconfigureServerProtocol); return preserveThreadName(() -> startRpcServer(configuration, rpcAddress, - ReconfigureProtocolPB.class, reconfigureService, handlerCount)); + ReconfigureProtocolDatanodePB.class, reconfigureService, handlerCount)); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index f59622cb0faf..bbaf58d36b4f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -233,7 +233,6 @@ public void start() { datanodeDetails.setRevision( HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); - datanodeDetails.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); @@ -424,17 +423,19 @@ private DatanodeDetails initializeDatanodeDetails() String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf); Preconditions.checkNotNull(idFilePath); File idFile = new File(idFilePath); + DatanodeDetails details; if (idFile.exists()) { - return ContainerUtils.readDatanodeDetailsFrom(idFile); + details = ContainerUtils.readDatanodeDetailsFrom(idFile); + // Current version is always overridden to the latest + details.setCurrentVersion(getDefaultCurrentVersion()); } else { // There is no datanode.id file, this might be the first time datanode // is started. - DatanodeDetails details = DatanodeDetails.newBuilder() - .setUuid(UUID.randomUUID()).build(); - details.setInitialVersion(DatanodeVersion.CURRENT_VERSION); - details.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); - return details; + details = DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build(); + details.setInitialVersion(getDefaultInitialVersion()); + details.setCurrentVersion(getDefaultCurrentVersion()); } + return details; } /** @@ -678,4 +679,20 @@ private String reconfigReplicationStreamsLimit(String value) { .setPoolSize(Integer.parseInt(value)); return value; } + + /** + * Returns the initial version of the datanode. + */ + @VisibleForTesting + public static int getDefaultInitialVersion() { + return DatanodeVersion.CURRENT_VERSION; + } + + /** + * Returns the current version of the datanode. + */ + @VisibleForTesting + public static int getDefaultCurrentVersion() { + return DatanodeVersion.CURRENT_VERSION; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java new file mode 100644 index 000000000000..eeed4fab5f72 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone; + + +import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; +import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.Service; +import org.apache.ratis.util.MemoizedSupplier; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Supplier; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL; + +/** + * {@link PolicyProvider} for Datanode protocols. + */ +@Private +@Unstable +public final class HddsPolicyProvider extends PolicyProvider { + + private static final Supplier SUPPLIER = + MemoizedSupplier.valueOf(HddsPolicyProvider::new); + + private HddsPolicyProvider() { + } + + @Private + @Unstable + public static HddsPolicyProvider getInstance() { + return SUPPLIER.get(); + } + + private static final List DN_SERVICES = + Arrays.asList( + new Service( + OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, + ReconfigureProtocol.class) + ); + + @Override + public Service[] getServices() { + return DN_SERVICES.toArray(new Service[0]); + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java index d271e7d5d48f..f7a38e3dec8b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java @@ -40,7 +40,8 @@ public enum DNAction implements AuditAction { CLOSE_CONTAINER, GET_COMMITTED_BLOCK_LENGTH, STREAM_INIT, - FINALIZE_BLOCK; + FINALIZE_BLOCK, + ECHO; @Override public String getAction() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java index fc193751893f..337e4e3e29ce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java @@ -52,6 +52,9 @@ public class ContainerMetrics { @Metric private MutableCounterLong containerDeleteFailedNonEmpty; @Metric private MutableCounterLong containerDeleteFailedBlockCountNotZero; @Metric private MutableCounterLong containerForceDelete; + @Metric private MutableCounterLong numReadStateMachine; + @Metric private MutableCounterLong bytesReadStateMachine; + private MutableCounterLong[] numOpsArray; private MutableCounterLong[] opsBytesArray; @@ -152,4 +155,20 @@ public long getContainerDeleteFailedBlockCountNotZero() { public long getContainerForceDelete() { return containerForceDelete.value(); } + + public void incNumReadStateMachine() { + numReadStateMachine.incr(); + } + + public long getNumReadStateMachine() { + return numReadStateMachine.value(); + } + + public void incBytesReadStateMachine(long bytes) { + bytesReadStateMachine.incr(bytes); + } + + public long getBytesReadStateMachine() { + return bytesReadStateMachine.value(); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java index f8acbc7e2d69..e0a4cd4cfb3d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java @@ -26,6 +26,7 @@ import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import java.util.UUID; @@ -56,7 +57,7 @@ private DatanodeIdYaml() { } /** - * Creates a yaml file using DatnodeDetails. This method expects the path + * Creates a yaml file using DatanodeDetails. This method expects the path * validation to be performed by the caller. * * @param datanodeDetails {@link DatanodeDetails} @@ -238,8 +239,9 @@ private static DatanodeDetailsYaml getDatanodeDetailsYaml( = new DatanodeLayoutStorage(conf, datanodeDetails.getUuidString()); Map portDetails = new LinkedHashMap<>(); - if (!CollectionUtils.isEmpty(datanodeDetails.getPorts())) { - for (DatanodeDetails.Port port : datanodeDetails.getPorts()) { + final List ports = datanodeDetails.getPorts(); + if (!CollectionUtils.isEmpty(ports)) { + for (DatanodeDetails.Port port : ports) { Field f = null; try { f = DatanodeDetails.Port.Name.class diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java index 8444b3bda1e8..210c538f274a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java @@ -25,12 +25,9 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import com.google.common.collect.ImmutableList; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Defines layout versions for the Chunks. @@ -39,22 +36,17 @@ public enum ContainerLayoutVersion { FILE_PER_CHUNK(1, "One file per chunk") { @Override - public File getChunkFile(File chunkDir, BlockID blockID, - ChunkInfo info) { - return new File(chunkDir, info.getChunkName()); + public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { + return new File(chunkDir, chunkName); } }, FILE_PER_BLOCK(2, "One file per block") { @Override - public File getChunkFile(File chunkDir, BlockID blockID, - ChunkInfo info) { + public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { return new File(chunkDir, blockID.getLocalID() + ".block"); } }; - private static final Logger LOG = - LoggerFactory.getLogger(ContainerLayoutVersion.class); - private static final ContainerLayoutVersion DEFAULT_LAYOUT = ContainerLayoutVersion.FILE_PER_BLOCK; @@ -118,12 +110,12 @@ public String getDescription() { } public abstract File getChunkFile(File chunkDir, - BlockID blockID, ChunkInfo info); + BlockID blockID, String chunkName); public File getChunkFile(ContainerData containerData, BlockID blockID, - ChunkInfo info) throws StorageContainerException { + String chunkName) throws StorageContainerException { File chunkDir = ContainerUtils.getChunkDir(containerData); - return getChunkFile(chunkDir, blockID, info); + return getChunkFile(chunkDir, blockID, chunkName); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index f20615d23f8c..8d7dc087c67b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -62,7 +62,6 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.OnDemandContainerDataScanner; import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; import org.apache.ratis.statemachine.StateMachine; import org.apache.ratis.thirdparty.com.google.protobuf.ProtocolMessageEnum; @@ -91,6 +90,16 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor { static final Logger LOG = LoggerFactory.getLogger(HddsDispatcher.class); private static final AuditLogger AUDIT = new AuditLogger(AuditLoggerType.DNLOGGER); + private static final String AUDIT_PARAM_CONTAINER_ID = "containerID"; + private static final String AUDIT_PARAM_CONTAINER_TYPE = "containerType"; + private static final String AUDIT_PARAM_FORCE_UPDATE = "forceUpdate"; + private static final String AUDIT_PARAM_FORCE_DELETE = "forceDelete"; + private static final String AUDIT_PARAM_START_CONTAINER_ID = "startContainerID"; + private static final String AUDIT_PARAM_BLOCK_DATA = "blockData"; + private static final String AUDIT_PARAM_BLOCK_DATA_SIZE = "blockDataSize"; + private static final String AUDIT_PARAM_COUNT = "count"; + private static final String AUDIT_PARAM_START_LOCAL_ID = "startLocalID"; + private static final String AUDIT_PARAM_PREV_CHUNKNAME = "prevChunkName"; private final Map handlers; private final ConfigurationSource conf; private final ContainerSet containerSet; @@ -257,7 +266,7 @@ private ContainerCommandResponseProto dispatchRequest( if (getMissingContainerSet().contains(containerID)) { StorageContainerException sce = new StorageContainerException( "ContainerID " + containerID - + " has been lost and and cannot be recreated on this DataNode", + + " has been lost and cannot be recreated on this DataNode", ContainerProtos.Result.CONTAINER_MISSING); audit(action, eventType, params, AuditEventStatus.FAILURE, sce); return ContainerUtils.logAndReturnError(LOG, sce, msg); @@ -487,7 +496,7 @@ ContainerCommandResponseProto createContainer( private void validateToken( ContainerCommandRequestProto msg) throws IOException { tokenVerifier.verify( - msg, UserGroupInformation.getCurrentUser().getShortUserName(), + msg, msg.getEncodedToken() ); } @@ -526,13 +535,14 @@ public void validateContainerCommand( Handler handler = getHandler(containerType); if (handler == null) { StorageContainerException ex = new StorageContainerException( - "Invalid " + "ContainerType " + containerType, + "Invalid ContainerType " + containerType, ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); audit(action, eventType, params, AuditEventStatus.FAILURE, ex); throw ex; } State containerState = container.getContainerState(); + String log = "Container " + containerID + " in " + containerState + " state"; if (!HddsUtils.isReadOnly(msg) && !HddsUtils.isOpenToWriteState(containerState)) { switch (cmdType) { @@ -546,14 +556,12 @@ public void validateContainerCommand( default: // if the container is not open/recovering, no updates can happen. Just // throw an exception - ContainerNotOpenException cex = new ContainerNotOpenException( - "Container " + containerID + " in " + containerState + " state"); + ContainerNotOpenException cex = new ContainerNotOpenException(log); audit(action, eventType, params, AuditEventStatus.FAILURE, cex); throw cex; } } else if (HddsUtils.isReadOnly(msg) && containerState == State.INVALID) { - InvalidContainerStateException iex = new InvalidContainerStateException( - "Container " + containerID + " in " + containerState + " state"); + InvalidContainerStateException iex = new InvalidContainerStateException(log); audit(action, eventType, params, AuditEventStatus.FAILURE, iex); throw iex; } @@ -605,7 +613,7 @@ private boolean isVolumeFull(Container container) { long volumeCapacity = precomputedVolumeSpace.getCapacity(); long volumeFreeSpaceToSpare = VolumeUsage.getMinVolumeFreeSpace(conf, volumeCapacity); - long volumeFree = volume.getAvailable(precomputedVolumeSpace); + long volumeFree = precomputedVolumeSpace.getAvailable(); long volumeCommitted = volume.getCommittedBytes(); long volumeAvailable = volumeFree - volumeCommitted; return (volumeAvailable <= volumeFreeSpaceToSpare); @@ -807,6 +815,7 @@ private static DNAction getAuditAction(Type cmdType) { case GetCommittedBlockLength : return DNAction.GET_COMMITTED_BLOCK_LENGTH; case StreamInit : return DNAction.STREAM_INIT; case FinalizeBlock : return DNAction.FINALIZE_BLOCK; + case Echo : return DNAction.ECHO; default : LOG.debug("Invalid command type - {}", cmdType); return null; @@ -820,36 +829,36 @@ private static Map getAuditParams( String containerID = String.valueOf(msg.getContainerID()); switch (cmdType) { case CreateContainer: - auditParams.put("containerID", containerID); - auditParams.put("containerType", + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); + auditParams.put(AUDIT_PARAM_CONTAINER_TYPE, msg.getCreateContainer().getContainerType().toString()); return auditParams; case ReadContainer: - auditParams.put("containerID", containerID); + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); return auditParams; case UpdateContainer: - auditParams.put("containerID", containerID); - auditParams.put("forceUpdate", + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); + auditParams.put(AUDIT_PARAM_FORCE_UPDATE, String.valueOf(msg.getUpdateContainer().getForceUpdate())); return auditParams; case DeleteContainer: - auditParams.put("containerID", containerID); - auditParams.put("forceDelete", + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); + auditParams.put(AUDIT_PARAM_FORCE_DELETE, String.valueOf(msg.getDeleteContainer().getForceDelete())); return auditParams; case ListContainer: - auditParams.put("startContainerID", containerID); - auditParams.put("count", + auditParams.put(AUDIT_PARAM_START_CONTAINER_ID, containerID); + auditParams.put(AUDIT_PARAM_COUNT, String.valueOf(msg.getListContainer().getCount())); return auditParams; case PutBlock: try { - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()) .toString()); } catch (IOException ex) { @@ -862,58 +871,58 @@ private static Map getAuditParams( return auditParams; case GetBlock: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getGetBlock().getBlockID()).toString()); return auditParams; case DeleteBlock: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getDeleteBlock().getBlockID()) .toString()); return auditParams; case ListBlock: - auditParams.put("startLocalID", + auditParams.put(AUDIT_PARAM_START_LOCAL_ID, String.valueOf(msg.getListBlock().getStartLocalID())); - auditParams.put("count", String.valueOf(msg.getListBlock().getCount())); + auditParams.put(AUDIT_PARAM_COUNT, String.valueOf(msg.getListBlock().getCount())); return auditParams; case ReadChunk: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString()); - auditParams.put("blockDataSize", + auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, String.valueOf(msg.getReadChunk().getChunkData().getLen())); return auditParams; case DeleteChunk: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getDeleteChunk().getBlockID()) .toString()); return auditParams; case WriteChunk: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()) .toString()); - auditParams.put("blockDataSize", + auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, String.valueOf(msg.getWriteChunk().getChunkData().getLen())); return auditParams; case ListChunk: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getListChunk().getBlockID()).toString()); - auditParams.put("prevChunkName", msg.getListChunk().getPrevChunkName()); - auditParams.put("count", String.valueOf(msg.getListChunk().getCount())); + auditParams.put(AUDIT_PARAM_PREV_CHUNKNAME, msg.getListChunk().getPrevChunkName()); + auditParams.put(AUDIT_PARAM_COUNT, String.valueOf(msg.getListChunk().getCount())); return auditParams; case CompactChunk: return null; //CompactChunk operation case PutSmallFile: try { - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockData.getFromProtoBuf(msg.getPutSmallFile() .getBlock().getBlockData()).toString()); - auditParams.put("blockDataSize", + auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, String.valueOf(msg.getPutSmallFile().getChunkInfo().getLen())); } catch (IOException ex) { if (LOG.isTraceEnabled()) { @@ -924,17 +933,17 @@ private static Map getAuditParams( return auditParams; case GetSmallFile: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getGetSmallFile().getBlock().getBlockID()) .toString()); return auditParams; case CloseContainer: - auditParams.put("containerID", containerID); + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); return auditParams; case GetCommittedBlockLength: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getGetCommittedBlockLength().getBlockID()) .toString()); return auditParams; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java index 5242c8686dcb..241abb6f4ae1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.protocol.proto. StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; @@ -68,7 +69,7 @@ public class ClosePipelineCommandHandler implements CommandHandler { */ public ClosePipelineCommandHandler(ConfigurationSource conf, Executor executor) { - this(RatisHelper.newRaftClient(conf), executor); + this(RatisHelper.newRaftClientNoRetry(conf), executor); } /** @@ -105,14 +106,16 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, try { XceiverServerSpi server = ozoneContainer.getWriteChannel(); if (server.isExist(pipelineIdProto)) { - server.removeGroup(pipelineIdProto); if (server instanceof XceiverServerRatis) { // TODO: Refactor Ratis logic to XceiverServerRatis // Propagate the group remove to the other Raft peers in the pipeline XceiverServerRatis ratisServer = (XceiverServerRatis) server; final RaftGroupId raftGroupId = RaftGroupId.valueOf(pipelineID.getId()); - final Collection peers = ratisServer.getRaftPeersInPipeline(pipelineID); final boolean shouldDeleteRatisLogDirectory = ratisServer.getShouldDeleteRatisLogDirectory(); + // This might throw GroupMismatchException if the Ratis group has been closed by other datanodes + final Collection peers = ratisServer.getRaftPeersInPipeline(pipelineID); + // Try to send remove group for the other datanodes first, ignoring GroupMismatchException + // if the Ratis group has been closed in the other datanodes peers.stream() .filter(peer -> !peer.getId().equals(ratisServer.getServer().getId())) .forEach(peer -> { @@ -122,19 +125,34 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, } catch (GroupMismatchException ae) { // ignore silently since this means that the group has been closed by earlier close pipeline // command in another datanode + LOG.debug("Failed to remove group {} for pipeline {} on peer {} since the group has " + + "been removed by earlier close pipeline command handled in another datanode", raftGroupId, + pipelineID, peer.getId()); } catch (IOException ioe) { - LOG.warn("Failed to remove group {} for peer {}", raftGroupId, peer.getId(), ioe); + LOG.warn("Failed to remove group {} of pipeline {} on peer {}", + raftGroupId, pipelineID, peer.getId(), ioe); } }); } + // Remove the Ratis group from the current datanode pipeline, might throw GroupMismatchException as + // well. It is a no-op for XceiverServerSpi implementations (e.g. XceiverServerGrpc) + server.removeGroup(pipelineIdProto); LOG.info("Close Pipeline {} command on datanode {}.", pipelineID, dn.getUuidString()); } else { - LOG.debug("Ignoring close pipeline command for pipeline {} " + - "as it does not exist", pipelineID); + LOG.debug("Ignoring close pipeline command for pipeline {} on datanode {} " + + "as it does not exist", pipelineID, dn.getUuidString()); } } catch (IOException e) { - LOG.error("Can't close pipeline {}", pipelineID, e); + Throwable gme = HddsClientUtils.containsException(e, GroupMismatchException.class); + if (gme != null) { + // ignore silently since this means that the group has been closed by earlier close pipeline + // command in another datanode + LOG.debug("The group for pipeline {} on datanode {} has been removed by earlier close " + + "pipeline command handled in another datanode", pipelineID, dn.getUuidString()); + } else { + LOG.error("Can't close pipeline {}", pipelineID, e); + } } finally { long endTime = Time.monotonicNow(); totalTime += endTime - startTime; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index a243b0c7dab1..747749066e3d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -514,7 +514,9 @@ private void markBlocksForDeletionTransaction( throws IOException { int newDeletionBlocks = 0; long containerId = delTX.getContainerID(); - logDeleteTransaction(containerId, containerData, delTX); + if (isDuplicateTransaction(containerId, containerData, delTX, blockDeleteMetrics)) { + return; + } try (DBHandle containerDB = BlockUtils.getDB(containerData, conf)) { DeleteTransactionStore store = (DeleteTransactionStore) containerDB.getStore(); @@ -536,7 +538,9 @@ private void markBlocksForDeletionSchemaV1( KeyValueContainerData containerData, DeletedBlocksTransaction delTX) throws IOException { long containerId = delTX.getContainerID(); - logDeleteTransaction(containerId, containerData, delTX); + if (isDuplicateTransaction(containerId, containerData, delTX, blockDeleteMetrics)) { + return; + } int newDeletionBlocks = 0; try (DBHandle containerDB = BlockUtils.getDB(containerData, conf)) { Table blockDataTable = @@ -626,20 +630,28 @@ private void updateMetaData(KeyValueContainerData containerData, } } - private void logDeleteTransaction(long containerId, - KeyValueContainerData containerData, DeletedBlocksTransaction delTX) { - if (LOG.isDebugEnabled()) { - LOG.debug("Processing Container : {}, DB path : {}, transaction {}", - containerId, containerData.getMetadataPath(), delTX.getTxID()); - } + public static boolean isDuplicateTransaction(long containerId, KeyValueContainerData containerData, + DeletedBlocksTransaction delTX, BlockDeletingServiceMetrics metrics) { + boolean duplicate = false; - if (delTX.getTxID() <= containerData.getDeleteTransactionId()) { - blockDeleteMetrics.incOutOfOrderDeleteBlockTransactionCount(); + if (delTX.getTxID() < containerData.getDeleteTransactionId()) { + if (metrics != null) { + metrics.incOutOfOrderDeleteBlockTransactionCount(); + } LOG.info(String.format("Delete blocks for containerId: %d" - + " is either received out of order or retried," - + " %d <= %d", containerId, delTX.getTxID(), + + " is received out of order, %d < %d", containerId, delTX.getTxID(), containerData.getDeleteTransactionId())); + } else if (delTX.getTxID() == containerData.getDeleteTransactionId()) { + duplicate = true; + LOG.info(String.format("Delete blocks with txID %d for containerId: %d" + + " is retried.", delTX.getTxID(), containerId)); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Processing Container : {}, DB path : {}, transaction {}", + containerId, containerData.getMetadataPath(), delTX.getTxID()); + } } + return duplicate; } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index 009e6396e0d2..346b05ebb4c1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -99,11 +99,11 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, this.id = datanodeDetails.getUuid(); this.datanodeDetails = datanodeDetails; - this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + this.port = conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); - if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { + if (conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { this.port = 0; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java index b776dc903de4..87572768e4af 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.container.common.transport.server.ratis; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.metrics2.MetricsSystem; @@ -132,67 +131,55 @@ public void incNumApplyTransactionsFails() { numApplyTransactionFails.incr(); } - @VisibleForTesting public long getNumWriteStateMachineOps() { return numWriteStateMachineOps.value(); } - @VisibleForTesting public long getNumQueryStateMachineOps() { return numQueryStateMachineOps.value(); } - @VisibleForTesting public long getNumApplyTransactionsOps() { return numApplyTransactionOps.value(); } - @VisibleForTesting public long getNumWriteStateMachineFails() { return numWriteStateMachineFails.value(); } - @VisibleForTesting public long getNumWriteDataFails() { return numWriteDataFails.value(); } - @VisibleForTesting public long getNumQueryStateMachineFails() { return numQueryStateMachineFails.value(); } - @VisibleForTesting public long getNumApplyTransactionsFails() { return numApplyTransactionFails.value(); } - @VisibleForTesting public long getNumReadStateMachineFails() { return numReadStateMachineFails.value(); } - @VisibleForTesting public long getNumReadStateMachineMissCount() { return numReadStateMachineMissCount.value(); } - @VisibleForTesting public long getNumReadStateMachineOps() { return numReadStateMachineOps.value(); } - @VisibleForTesting public long getNumBytesWrittenCount() { return numBytesWrittenCount.value(); } - @VisibleForTesting public long getNumBytesCommittedCount() { return numBytesCommittedCount.value(); } - public MutableRate getApplyTransactionLatencyNs() { + MutableRate getApplyTransactionLatencyNs() { return applyTransactionNs; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 30496ce51a02..9eb5b909ccea 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -219,8 +219,8 @@ public ContainerStateMachine(RaftGroupId gid, this.writeChunkFutureMap = new ConcurrentHashMap<>(); applyTransactionCompletionMap = new ConcurrentHashMap<>(); long pendingRequestsBytesLimit = (long)conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); // cache with FIFO eviction, and if element not found, this needs // to be obtained from disk for slow follower @@ -238,13 +238,13 @@ public ContainerStateMachine(RaftGroupId gid, this.container2BCSIDMap = new ConcurrentHashMap<>(); final int numContainerOpExecutors = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); int maxPendingApplyTransactions = conf.getInt( ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); stateMachineHealthy = new AtomicBoolean(true); @@ -432,11 +432,10 @@ public TransactionContext startTransaction(RaftClientRequest request) if (!blockAlreadyFinalized) { // create the log entry proto final WriteChunkRequestProto commitWriteChunkProto = - WriteChunkRequestProto.newBuilder() - .setBlockID(write.getBlockID()) - .setChunkData(write.getChunkData()) + WriteChunkRequestProto.newBuilder(write) // skipping the data field as it is // already set in statemachine data proto + .clearData() .build(); ContainerCommandRequestProto commitContainerCommandProto = ContainerCommandRequestProto diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index fcc611ea3f10..75f0482a6ca7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -110,12 +110,12 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; import static org.apache.ratis.util.Preconditions.assertTrue; /** @@ -189,9 +189,8 @@ private XceiverServerRatis(DatanodeDetails dd, ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); assignPorts(); this.streamEnable = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); - RaftProperties serverProperties = newRaftProperties(); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); this.context = context; this.dispatcher = dispatcher; this.containerController = containerController; @@ -202,6 +201,7 @@ private XceiverServerRatis(DatanodeDetails dd, shouldDeleteRatisLogDirectory = ratisServerConfig.shouldDeleteRatisLogDirectory(); + RaftProperties serverProperties = newRaftProperties(); this.server = RaftServer.newBuilder().setServerId(raftPeerId) .setProperties(serverProperties) @@ -217,17 +217,17 @@ private XceiverServerRatis(DatanodeDetails dd, private void assignPorts() { clientPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); if (DatanodeVersion.fromProtoValue(datanodeDetails.getInitialVersion()) .compareTo(SEPARATE_RATIS_PORTS_AVAILABLE) >= 0) { adminPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); serverPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); } else { adminPort = clientPort; serverPort = clientPort; @@ -236,8 +236,8 @@ private void assignPorts() { private int determinePort(String key, int defaultValue) { boolean randomPort = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); return randomPort ? 0 : conf.getInt(key, defaultValue); } @@ -249,14 +249,14 @@ private ContainerStateMachine getStateMachine(RaftGroupId gid) { private void setUpRatisStream(RaftProperties properties) { // set the datastream config if (conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { dataStreamPort = 0; } else { dataStreamPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); } RatisHelper.enableNettyStreaming(properties); NettyConfigKeys.DataStream.setPort(properties, dataStreamPort); @@ -327,8 +327,8 @@ public RaftProperties newRaftProperties() { } long snapshotThreshold = - conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, - OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); + conf.getLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, + OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); RaftServerConfigKeys.Snapshot. setAutoTriggerEnabled(properties, true); RaftServerConfigKeys.Snapshot. @@ -338,11 +338,11 @@ public RaftProperties newRaftProperties() { setPendingRequestsLimits(properties); int logQueueNumElements = - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); + conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); final long logQueueByteLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setQueueElementLimit( properties, logQueueNumElements); @@ -353,8 +353,8 @@ public RaftProperties newRaftProperties() { false); int purgeGap = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap); //Set the number of Snapshots Retained. @@ -375,12 +375,12 @@ private void setRatisLeaderElectionTimeout(RaftProperties properties) { long duration; TimeUnit leaderElectionMinTimeoutUnit = OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getDuration(), leaderElectionMinTimeoutUnit); final TimeDuration leaderElectionMinTimeout = TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit); @@ -396,11 +396,11 @@ private void setTimeoutForRetryCache(RaftProperties properties) { TimeUnit timeUnit; long duration; timeUnit = - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getDuration(), timeUnit); final TimeDuration retryCacheTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -410,8 +410,8 @@ private void setTimeoutForRetryCache(RaftProperties properties) { private long setRaftSegmentPreallocatedSize(RaftProperties properties) { final long raftSegmentPreallocatedSize = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setPreallocatedSize(properties, SizeInBytes.valueOf(raftSegmentPreallocatedSize)); @@ -420,23 +420,23 @@ private long setRaftSegmentPreallocatedSize(RaftProperties properties) { private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) { final int logAppenderQueueNumElements = conf.getInt( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); final long raftSegmentSize = (long) conf.getStorageSize( - DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, - DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, StorageUnit.BYTES); final long raftSegmentBufferSize = logAppenderQueueByteLimit + 8; assertTrue(raftSegmentBufferSize <= raftSegmentSize, - () -> DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + () -> HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + logAppenderQueueByteLimit - + " must be <= (" + DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + + " must be <= (" + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + " = " + (raftSegmentSize - 8) + ")"); RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, @@ -454,11 +454,11 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true); TimeUnit timeUnit = OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); long duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT .getDuration(), timeUnit); final TimeDuration dataSyncTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -479,7 +479,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs / dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS); int numSyncRetries = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, syncTimeoutRetryDefault); RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties, numSyncRetries); @@ -507,8 +507,8 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { private RpcType setRpcType(RaftProperties properties) { final String rpcType = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); RatisHelper.setRpcType(properties, rpc); return rpc; @@ -517,8 +517,8 @@ private RpcType setRpcType(RaftProperties properties) { private void setPendingRequestsLimits(RaftProperties properties) { long pendingRequestsBytesLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); final int pendingRequestsMegaBytesLimit = HddsUtils.roundupMb(pendingRequestsBytesLimit); @@ -990,9 +990,9 @@ private static List createChunkExecutors( // TODO create single pool with N threads if using non-incremental chunks final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java deleted file mode 100644 index 0a2375b4f44e..000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java +++ /dev/null @@ -1,1295 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.Beta; -import com.google.common.annotations.GwtCompatible; -import com.google.common.base.Preconditions; -import static com.google.common.base.Preconditions.checkNotNull; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; -import com.google.common.util.concurrent.Uninterruptibles; -import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater - .newUpdater; - -import jakarta.annotation.Nullable; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; -import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; -import java.util.concurrent.locks.LockSupport; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * An abstract implementation of {@link ListenableFuture}, intended for - * advanced users only. More common ways to create a {@code ListenableFuture} - * include instantiating a {@link SettableFuture}, submitting a task to a - * {@link ListeningExecutorService}, and deriving a {@code Future} from an - * existing one, typically using methods like {@link Futures#transform - * (ListenableFuture, com.google.common.base.Function) Futures.transform} - * and its overloaded versions. - *

- *

This class implements all methods in {@code ListenableFuture}. - * Subclasses should provide a way to set the result of the computation - * through the protected methods {@link #set(Object)}, - * {@link #setFuture(ListenableFuture)} and {@link #setException(Throwable)}. - * Subclasses may also override {@link #interruptTask()}, which will be - * invoked automatically if a call to {@link #cancel(boolean) cancel(true)} - * succeeds in canceling the future. Subclasses should rarely override other - * methods. - */ - -@GwtCompatible(emulated = true) -public abstract class AbstractFuture implements ListenableFuture { - // NOTE: Whenever both tests are cheap and functional, it's faster to use &, - // | instead of &&, || - - private static final boolean GENERATE_CANCELLATION_CAUSES = - Boolean.parseBoolean( - System.getProperty("guava.concurrent.generate_cancellation_cause", - "false")); - - /** - * A less abstract subclass of AbstractFuture. This can be used to optimize - * setFuture by ensuring that {@link #get} calls exactly the implementation - * of {@link AbstractFuture#get}. - */ - abstract static class TrustedFuture extends AbstractFuture { - @Override - public final V get() throws InterruptedException, ExecutionException { - return super.get(); - } - - @Override - public final V get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return super.get(timeout, unit); - } - - @Override - public final boolean isDone() { - return super.isDone(); - } - - @Override - public final boolean isCancelled() { - return super.isCancelled(); - } - - @Override - public final void addListener(Runnable listener, Executor executor) { - super.addListener(listener, executor); - } - - @Override - public final boolean cancel(boolean mayInterruptIfRunning) { - return super.cancel(mayInterruptIfRunning); - } - } - - // Logger to log exceptions caught when running listeners. - private static final Logger LOG = Logger - .getLogger(AbstractFuture.class.getName()); - - // A heuristic for timed gets. If the remaining timeout is less than this, - // spin instead of - // blocking. This value is what AbstractQueuedSynchronizer uses. - private static final long SPIN_THRESHOLD_NANOS = 1000L; - - private static final AtomicHelper ATOMIC_HELPER; - - static { - AtomicHelper helper; - - try { - helper = new UnsafeAtomicHelper(); - } catch (Throwable unsafeFailure) { - // catch absolutely everything and fall through to our 'SafeAtomicHelper' - // The access control checks that ARFU does means the caller class has - // to be AbstractFuture - // instead of SafeAtomicHelper, so we annoyingly define these here - try { - helper = - new SafeAtomicHelper( - newUpdater(Waiter.class, Thread.class, "thread"), - newUpdater(Waiter.class, Waiter.class, "next"), - newUpdater(AbstractFuture.class, Waiter.class, "waiters"), - newUpdater(AbstractFuture.class, Listener.class, "listeners"), - newUpdater(AbstractFuture.class, Object.class, "value")); - } catch (Throwable atomicReferenceFieldUpdaterFailure) { - // Some Android 5.0.x Samsung devices have bugs in JDK reflection APIs - // that cause getDeclaredField to throw a NoSuchFieldException when - // the field is definitely there. - // For these users fallback to a suboptimal implementation, based on - // synchronized. This will be a definite performance hit to those users. - LOG.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure); - LOG.log( - Level.SEVERE, "SafeAtomicHelper is broken!", - atomicReferenceFieldUpdaterFailure); - helper = new SynchronizedHelper(); - } - } - ATOMIC_HELPER = helper; - - // Prevent rare disastrous classloading in first call to LockSupport.park. - // See: https://bugs.openjdk.java.net/browse/JDK-8074773 - @SuppressWarnings("unused") - Class ensureLoaded = LockSupport.class; - } - - /** - * Waiter links form a Treiber stack, in the {@link #waiters} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Waiter { - static final Waiter TOMBSTONE = new Waiter(false /* ignored param */); - - @Nullable volatile Thread thread; - @Nullable volatile Waiter next; - - /** - * Constructor for the TOMBSTONE, avoids use of ATOMIC_HELPER in case this - * class is loaded before the ATOMIC_HELPER. Apparently this is possible - * on some android platforms. - */ - Waiter(boolean unused) { - } - - Waiter() { - // avoid volatile write, write is made visible by subsequent CAS on - // waiters field - ATOMIC_HELPER.putThread(this, Thread.currentThread()); - } - - // non-volatile write to the next field. Should be made visible by - // subsequent CAS on waiters field. - void setNext(Waiter next) { - ATOMIC_HELPER.putNext(this, next); - } - - void unpark() { - // This is racy with removeWaiter. The consequence of the race is that - // we may spuriously call unpark even though the thread has already - // removed itself from the list. But even if we did use a CAS, that - // race would still exist (it would just be ever so slightly smaller). - Thread w = thread; - if (w != null) { - thread = null; - LockSupport.unpark(w); - } - } - } - - /** - * Marks the given node as 'deleted' (null waiter) and then scans the list - * to unlink all deleted nodes. This is an O(n) operation in the common - * case (and O(n^2) in the worst), but we are saved by two things. - *

    - *
  • This is only called when a waiting thread times out or is - * interrupted. Both of which should be rare. - *
  • The waiters list should be very short. - *
- */ - private void removeWaiter(Waiter node) { - node.thread = null; // mark as 'deleted' - restart: - while (true) { - Waiter pred = null; - Waiter curr = waiters; - if (curr == Waiter.TOMBSTONE) { - return; // give up if someone is calling complete - } - Waiter succ; - while (curr != null) { - succ = curr.next; - if (curr.thread != null) { // we aren't unlinking this node, update - // pred. - pred = curr; - } else if (pred != null) { // We are unlinking this node and it has a - // predecessor. - pred.next = succ; - if (pred.thread == null) { // We raced with another node that - // unlinked pred. Restart. - continue restart; - } - } else if (!ATOMIC_HELPER - .casWaiters(this, curr, succ)) { // We are unlinking head - continue restart; // We raced with an add or complete - } - curr = succ; - } - break; - } - } - - /** - * Listeners also form a stack through the {@link #listeners} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Listener { - static final Listener TOMBSTONE = new Listener(null, null); - final Runnable task; - final Executor executor; - - // writes to next are made visible by subsequent CAS's on the listeners - // field - @Nullable Listener next; - - Listener(Runnable task, Executor executor) { - this.task = task; - this.executor = executor; - } - } - - /** - * A special value to represent {@code null}. - */ - private static final Object NULL = new Object(); - - /** - * A special value to represent failure, when {@link #setException} is - * called successfully. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Failure { - static final Failure FALLBACK_INSTANCE = - new Failure( - new Throwable("Failure occurred while trying to finish a future.") { - @Override - public synchronized Throwable fillInStackTrace() { - return this; // no stack trace - } - }); - final Throwable exception; - - Failure(Throwable exception) { - this.exception = checkNotNull(exception); - } - } - - /** - * A special value to represent cancellation and the 'wasInterrupted' bit. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Cancellation { - final boolean wasInterrupted; - @Nullable final Throwable cause; - - Cancellation(boolean wasInterrupted, @Nullable Throwable cause) { - this.wasInterrupted = wasInterrupted; - this.cause = cause; - } - } - - /** - * A special value that encodes the 'setFuture' state. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SetFuture implements Runnable { - final AbstractFuture owner; - final ListenableFuture future; - - SetFuture(AbstractFuture owner, ListenableFuture future) { - this.owner = owner; - this.future = future; - } - - @Override - public void run() { - if (owner.value != this) { - // nothing to do, we must have been cancelled, don't bother inspecting - // the future. - return; - } - Object valueToSet = getFutureValue(future); - if (ATOMIC_HELPER.casValue(owner, this, valueToSet)) { - complete(owner); - } - } - } - - /** - * This field encodes the current state of the future. - *

- *

The valid values are: - *

    - *
  • {@code null} initial state, nothing has happened. - *
  • {@link Cancellation} terminal state, {@code cancel} was called. - *
  • {@link Failure} terminal state, {@code setException} was called. - *
  • {@link SetFuture} intermediate state, {@code setFuture} was called. - *
  • {@link #NULL} terminal state, {@code set(null)} was called. - *
  • Any other non-null value, terminal state, {@code set} was called with - * a non-null argument. - *
- */ - private volatile Object value; - - /** - * All listeners. - */ - private volatile Listener listeners; - - /** - * All waiting threads. - */ - private volatile Waiter waiters; - - /** - * Constructor for use by subclasses. - */ - protected AbstractFuture() { - } - - // Gets and Timed Gets - // - // * Be responsive to interruption - // * Don't create Waiter nodes if you aren't going to park, this helps - // reduce contention on the waiters field. - // * Future completion is defined by when #value becomes non-null/non - // SetFuture - // * Future completion can be observed if the waiters field contains a - // TOMBSTONE - - // Timed Get - // There are a few design constraints to consider - // * We want to be responsive to small timeouts, unpark() has non trivial - // latency overheads (I have observed 12 micros on 64 bit linux systems to - // wake up a parked thread). So if the timeout is small we shouldn't park(). - // This needs to be traded off with the cpu overhead of spinning, so we use - // SPIN_THRESHOLD_NANOS which is what AbstractQueuedSynchronizer uses for - // similar purposes. - // * We want to behave reasonably for timeouts of 0 - // * We are more responsive to completion than timeouts. This is because - // parkNanos depends on system scheduling and as such we could either miss - // our deadline, or unpark() could be delayed so that it looks like we - // timed out even though we didn't. For comparison FutureTask respects - // completion preferably and AQS is non-deterministic (depends on where in - // the queue the waiter is). If we wanted to be strict about it, we could - // store the unpark() time in the Waiter node and we could use that to make - // a decision about whether or not we timed out prior to being unparked. - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get(long timeout, TimeUnit unit) - throws InterruptedException, TimeoutException, ExecutionException { - // NOTE: if timeout < 0, remainingNanos will be < 0 and we will fall into - // the while(true) loop at the bottom and throw a timeoutexception. - long remainingNanos = unit - .toNanos(timeout); // we rely on the implicit null check on unit. - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - // we delay calling nanoTime until we know we will need to either park or - // spin - final long endNanos = remainingNanos > 0 ? System - .nanoTime() + remainingNanos : 0; - long_wait_loop: - if (remainingNanos >= SPIN_THRESHOLD_NANOS) { - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - while (true) { - LockSupport.parkNanos(this, remainingNanos); - // Check interruption first, if we woke up due to interruption - // we need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - - // timed out? - remainingNanos = endNanos - System.nanoTime(); - if (remainingNanos < SPIN_THRESHOLD_NANOS) { - // Remove the waiter, one way or another we are done parking - // this thread. - removeWaiter(node); - break long_wait_loop; // jump down to the busy wait loop - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - // If we get here then we have remainingNanos < SPIN_THRESHOLD_NANOS and - // there is no node on the waiters list - while (remainingNanos > 0) { - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - if (Thread.interrupted()) { - throw new InterruptedException(); - } - remainingNanos = endNanos - System.nanoTime(); - } - throw new TimeoutException(); - } - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get() throws InterruptedException, ExecutionException { - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - // we are on the stack, now wait for completion. - while (true) { - LockSupport.park(this); - // Check interruption first, if we woke up due to interruption we - // need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - - /** - * Unboxes {@code obj}. Assumes that obj is not {@code null} or a - * {@link SetFuture}. - */ - private V getDoneValue(Object obj) throws ExecutionException { - // While this seems like it might be too branch-y, simple benchmarking - // proves it to be unmeasurable (comparing done AbstractFutures with - // immediateFuture) - if (obj instanceof Cancellation) { - throw cancellationExceptionWithCause( - "Task was cancelled.", ((Cancellation) obj).cause); - } else if (obj instanceof Failure) { - throw new ExecutionException(((Failure) obj).exception); - } else if (obj == NULL) { - return null; - } else { - @SuppressWarnings("unchecked") // this is the only other option - V asV = (V) obj; - return asV; - } - } - - @Override - public boolean isDone() { - final Object localValue = value; - return localValue != null & !(localValue instanceof SetFuture); - } - - @Override - public boolean isCancelled() { - final Object localValue = value; - return localValue instanceof Cancellation; - } - - /** - * {@inheritDoc} - *

- *

If a cancellation attempt succeeds on a {@code Future} that had - * previously been {@linkplain#setFuture set asynchronously}, then the - * cancellation will also be propagated to the delegate {@code Future} that - * was supplied in the {@code setFuture} call. - */ - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - Object localValue = value; - boolean rValue = false; - if (localValue == null | localValue instanceof SetFuture) { - // Try to delay allocating the exception. At this point we may still - // lose the CAS, but it is certainly less likely. - Throwable cause = - GENERATE_CANCELLATION_CAUSES - ? new CancellationException("Future.cancel() was called.") - : null; - Object valueToSet = new Cancellation(mayInterruptIfRunning, cause); - AbstractFuture abstractFuture = this; - while (true) { - if (ATOMIC_HELPER.casValue(abstractFuture, localValue, valueToSet)) { - rValue = true; - // We call interuptTask before calling complete(), which is - // consistent with FutureTask - if (mayInterruptIfRunning) { - abstractFuture.interruptTask(); - } - complete(abstractFuture); - if (localValue instanceof SetFuture) { - // propagate cancellation to the future set in setfuture, this is - // racy, and we don't care if we are successful or not. - ListenableFuture futureToPropagateTo = ((SetFuture) localValue) - .future; - if (futureToPropagateTo instanceof TrustedFuture) { - // If the future is a TrustedFuture then we specifically avoid - // calling cancel() this has 2 benefits - // 1. for long chains of futures strung together with setFuture - // we consume less stack - // 2. we avoid allocating Cancellation objects at every level of - // the cancellation chain - // We can only do this for TrustedFuture, because - // TrustedFuture.cancel is final and does nothing but delegate - // to this method. - AbstractFuture trusted = (AbstractFuture) - futureToPropagateTo; - localValue = trusted.value; - if (localValue == null | localValue instanceof SetFuture) { - abstractFuture = trusted; - continue; // loop back up and try to complete the new future - } - } else { - // not a TrustedFuture, call cancel directly. - futureToPropagateTo.cancel(mayInterruptIfRunning); - } - } - break; - } - // obj changed, reread - localValue = abstractFuture.value; - if (!(localValue instanceof SetFuture)) { - // obj cannot be null at this point, because value can only change - // from null to non-null. So if value changed (and it did since we - // lost the CAS), then it cannot be null and since it isn't a - // SetFuture, then the future must be done and we should exit the loop - break; - } - } - } - return rValue; - } - - /** - * Subclasses can override this method to implement interruption of the - * future's computation. The method is invoked automatically by a - * successful call to {@link #cancel(boolean) cancel(true)}. - *

- *

The default implementation does nothing. - * - * @since 10.0 - */ - protected void interruptTask() { - } - - /** - * Returns true if this future was cancelled with {@code - * mayInterruptIfRunning} set to {@code true}. - * - * @since 14.0 - */ - protected final boolean wasInterrupted() { - final Object localValue = value; - return (localValue instanceof Cancellation) && ((Cancellation) localValue) - .wasInterrupted; - } - - /** - * {@inheritDoc} - * - * @since 10.0 - */ - @Override - public void addListener(Runnable listener, Executor executor) { - checkNotNull(listener, "Runnable was null."); - checkNotNull(executor, "Executor was null."); - Listener oldHead = listeners; - if (oldHead != Listener.TOMBSTONE) { - Listener newNode = new Listener(listener, executor); - do { - newNode.next = oldHead; - if (ATOMIC_HELPER.casListeners(this, oldHead, newNode)) { - return; - } - oldHead = listeners; // re-read - } while (oldHead != Listener.TOMBSTONE); - } - // If we get here then the Listener TOMBSTONE was set, which means the - // future is done, call the listener. - executeListener(listener, executor); - } - - /** - * Sets the result of this {@code Future} unless this {@code Future} has - * already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the {@code - * Future} may have previously been set asynchronously, in which case its - * result may not be known yet. That result, though not yet known, cannot - * be overridden by a call to a {@code set*} method, only by a call to - * {@link #cancel}. - * - * @param value the value to be used as the result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean set(@Nullable V val) { - Object valueToSet = value == null ? NULL : val; - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the failed result of this {@code Future} unless this {@code Future} - * has already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the - * {@code Future} may have previously been set asynchronously, in which case - * its result may not be known yet. That result, though not yet known, - * cannot be overridden by a call to a {@code set*} method, only by a call - * to {@link #cancel}. - * - * @param throwable the exception to be used as the failed result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean setException(Throwable throwable) { - Object valueToSet = new Failure(checkNotNull(throwable)); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the result of this {@code Future} to match the supplied input - * {@code Future} once the supplied {@code Future} is done, unless this - * {@code Future} has already been cancelled or set (including "set - * asynchronously," defined below). - *

- *

If the supplied future is {@linkplain #isDone done} when this method - * is called and the call is accepted, then this future is guaranteed to - * have been completed with the supplied future by the time this method - * returns. If the supplied future is not done and the call is accepted, then - * the future will be set asynchronously. Note that such a result, - * though not yet known, cannot be overridden by a call to a {@code set*} - * method, only by a call to {@link #cancel}. - *

- *

If the call {@code setFuture(delegate)} is accepted and this {@code - * Future} is later cancelled, cancellation will be propagated to {@code - * delegate}. Additionally, any call to {@code setFuture} after any - * cancellation will propagate cancellation to the supplied {@code Future}. - * - * @param future the future to delegate to - * @return true if the attempt was accepted, indicating that the {@code - * Future} was not previously cancelled or set. - * @since 19.0 - */ - @Beta - @SuppressWarnings("deadstore") - protected boolean setFuture(ListenableFuture future) { - checkNotNull(future); - Object localValue = value; - if (localValue == null) { - if (future.isDone()) { - Object val = getFutureValue(future); - if (ATOMIC_HELPER.casValue(this, null, val)) { - complete(this); - return true; - } - return false; - } - SetFuture valueToSet = new SetFuture(this, future); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - // the listener is responsible for calling completeWithFuture, - // directExecutor is appropriate since all we are doing is unpacking - // a completed future which should be fast. - try { - future.addListener(valueToSet, directExecutor()); - } catch (Throwable t) { - // addListener has thrown an exception! SetFuture.run can't throw - // any exceptions so this must have been caused by addListener - // itself. The most likely explanation is a misconfigured mock. Try - // to switch to Failure. - Failure failure; - try { - failure = new Failure(t); - } catch (Throwable oomMostLikely) { - failure = Failure.FALLBACK_INSTANCE; - } - // Note: The only way this CAS could fail is if cancel() has raced - // with us. That is ok. - boolean unused = ATOMIC_HELPER.casValue(this, valueToSet, failure); - } - return true; - } - localValue = value; // we lost the cas, fall through and maybe cancel - } - // The future has already been set to something. If it is cancellation we - // should cancel the incoming future. - if (localValue instanceof Cancellation) { - // we don't care if it fails, this is best-effort. - future.cancel(((Cancellation) localValue).wasInterrupted); - } - return false; - } - - /** - * Returns a value, suitable for storing in the {@link #value} field. From - * the given future, which is assumed to be done. - *

- *

This is approximately the inverse of {@link #getDoneValue(Object)} - */ - private static Object getFutureValue(ListenableFuture future) { - Object valueToSet; - if (future instanceof TrustedFuture) { - // Break encapsulation for TrustedFuture instances since we know that - // subclasses cannot override .get() (since it is final) and therefore - // this is equivalent to calling .get() and unpacking the exceptions - // like we do below (just much faster because it is a single field read - // instead of a read, several branches and possibly creating exceptions). - return ((AbstractFuture) future).value; - } else { - // Otherwise calculate valueToSet by calling .get() - try { - Object v = getDone(future); - valueToSet = v == null ? NULL : v; - } catch (ExecutionException exception) { - valueToSet = new Failure(exception.getCause()); - } catch (CancellationException cancellation) { - valueToSet = new Cancellation(false, cancellation); - } catch (Throwable t) { - valueToSet = new Failure(t); - } - } - return valueToSet; - } - - /** - * Unblocks all threads and runs all listeners. - */ - private static void complete(AbstractFuture future) { - Listener next = null; - outer: - while (true) { - future.releaseWaiters(); - // We call this before the listeners in order to avoid needing to manage - // a separate stack data structure for them. afterDone() should be - // generally fast and only used for cleanup work... but in theory can - // also be recursive and create StackOverflowErrors - future.afterDone(); - // push the current set of listeners onto next - next = future.clearListeners(next); - future = null; - while (next != null) { - Listener curr = next; - next = next.next; - Runnable task = curr.task; - if (task instanceof SetFuture) { - SetFuture setFuture = (SetFuture) task; - // We unwind setFuture specifically to avoid StackOverflowErrors in - // the case of long chains of SetFutures - // Handling this special case is important because there is no way - // to pass an executor to setFuture, so a user couldn't break the - // chain by doing this themselves. It is also potentially common - // if someone writes a recursive Futures.transformAsync transformer. - future = setFuture.owner; - if (future.value == setFuture) { - Object valueToSet = getFutureValue(setFuture.future); - if (ATOMIC_HELPER.casValue(future, setFuture, valueToSet)) { - continue outer; - } - } - // other wise the future we were trying to set is already done. - } else { - executeListener(task, curr.executor); - } - } - break; - } - } - - public static V getDone(Future future) throws ExecutionException { - /* - * We throw IllegalStateException, since the call could succeed later. - * Perhaps we "should" throw IllegalArgumentException, since the call - * could succeed with a different argument. Those exceptions' docs - * suggest that either is acceptable. Google's Java Practices page - * recommends IllegalArgumentException here, in part to keep its - * recommendation simple: Static methods should throw - * IllegalStateException only when they use static state. - * - * - * Why do we deviate here? The answer: We want for fluentFuture.getDone() - * to throw the same exception as Futures.getDone(fluentFuture). - */ - Preconditions.checkState(future.isDone(), "Future was expected to be " + - "done:" + - " %s", future); - return Uninterruptibles.getUninterruptibly(future); - } - - /** - * Callback method that is called exactly once after the future is completed. - *

- *

If {@link #interruptTask} is also run during completion, - * {@link #afterDone} runs after it. - *

- *

The default implementation of this method in {@code AbstractFuture} - * does nothing. This is intended for very lightweight cleanup work, for - * example, timing statistics or clearing fields. - * If your task does anything heavier consider, just using a listener with - * an executor. - * - * @since 20.0 - */ - @Beta - protected void afterDone() { - } - - /** - * If this future has been cancelled (and possibly interrupted), cancels - * (and possibly interrupts) the given future (if available). - *

- *

This method should be used only when this future is completed. It is - * designed to be called from {@code done}. - */ - final void maybePropagateCancellation(@Nullable Future related) { - if (related != null & isCancelled()) { - related.cancel(wasInterrupted()); - } - } - - /** - * Releases all threads in the {@link #waiters} list, and clears the list. - */ - private void releaseWaiters() { - Waiter head; - do { - head = waiters; - } while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE)); - for (Waiter currentWaiter = head; - currentWaiter != null; currentWaiter = currentWaiter.next) { - currentWaiter.unpark(); - } - } - - /** - * Clears the {@link #listeners} list and prepends its contents to {@code - * onto}, least recently added first. - */ - private Listener clearListeners(Listener onto) { - // We need to - // 1. atomically swap the listeners with TOMBSTONE, this is because - // addListener uses that to to synchronize with us - // 2. reverse the linked list, because despite our rather clear contract, - // people depend on us executing listeners in the order they were added - // 3. push all the items onto 'onto' and return the new head of the stack - Listener head; - do { - head = listeners; - } while (!ATOMIC_HELPER.casListeners(this, head, Listener.TOMBSTONE)); - Listener reversedList = onto; - while (head != null) { - Listener tmp = head; - head = head.next; - tmp.next = reversedList; - reversedList = tmp; - } - return reversedList; - } - - /** - * Submits the given runnable to the given {@link Executor} catching and - * logging all {@linkplain RuntimeException runtime exceptions} thrown by - * the executor. - */ - private static void executeListener(Runnable runnable, Executor executor) { - try { - executor.execute(runnable); - } catch (RuntimeException e) { - // Log it and keep going -- bad runnable and/or executor. Don't punish - // the other runnables if we're given a bad one. We only catch - // RuntimeException because we want Errors to propagate up. - LOG.log( - Level.SEVERE, - "RuntimeException while executing runnable " + runnable + " with " + - "executor " + executor, - e); - } - } - - private abstract static class AtomicHelper { - /** - * Non volatile write of the thread to the {@link Waiter#thread} field. - */ - abstract void putThread(Waiter waiter, Thread newValue); - - /** - * Non volatile write of the waiter to the {@link Waiter#next} field. - */ - abstract void putNext(Waiter waiter, Waiter newValue); - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - abstract boolean casWaiters( - AbstractFuture future, Waiter expect, - Waiter update); - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - abstract boolean casListeners( - AbstractFuture future, Listener expect, - Listener update); - - /** - * Performs a CAS operation on the {@link #value} field. - */ - abstract boolean casValue( - AbstractFuture future, Object expect, Object update); - } - - /** - * {@link AtomicHelper} based on {@link sun.misc.Unsafe}. - *

- *

Static initialization of this class will fail if the - * {@link sun.misc.Unsafe} object cannot be accessed. - */ - private static final class UnsafeAtomicHelper extends AtomicHelper { - static final sun.misc.Unsafe UNSAFE; - static final long LISTENERS_OFFSET; - static final long WAITERS_OFFSET; - static final long VALUE_OFFSET; - static final long WAITER_THREAD_OFFSET; - static final long WAITER_NEXT_OFFSET; - - static { - sun.misc.Unsafe unsafe = null; - try { - unsafe = sun.misc.Unsafe.getUnsafe(); - } catch (SecurityException tryReflectionInstead) { - try { - unsafe = - AccessController.doPrivileged( - new PrivilegedExceptionAction() { - @Override - public sun.misc.Unsafe run() throws Exception { - Class k = sun.misc.Unsafe.class; - for (java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - Object x = f.get(null); - if (k.isInstance(x)) { - return k.cast(x); - } - } - throw new NoSuchFieldError("the Unsafe"); - } - }); - } catch (PrivilegedActionException e) { - throw new RuntimeException( - "Could not initialize intrinsics", e.getCause()); - } - } - try { - Class abstractFuture = AbstractFuture.class; - WAITERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("waiters")); - LISTENERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("listeners")); - VALUE_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("value")); - WAITER_THREAD_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("thread")); - WAITER_NEXT_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("next")); - UNSAFE = unsafe; - } catch (Exception e) { - throwIfUnchecked(e); - throw new RuntimeException(e); - } - } - - public static void throwIfUnchecked(Throwable throwable) { - checkNotNull(throwable); - if (throwable instanceof RuntimeException) { - throw (RuntimeException) throwable; - } - if (throwable instanceof Error) { - throw (Error) throwable; - } - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - UNSAFE.putObject(waiter, WAITER_THREAD_OFFSET, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - UNSAFE.putObject(waiter, WAITER_NEXT_OFFSET, newValue); - } - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return UNSAFE - .compareAndSwapObject(future, WAITERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return UNSAFE - .compareAndSwapObject(future, LISTENERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #value} field. - */ - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return UNSAFE.compareAndSwapObject(future, VALUE_OFFSET, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SafeAtomicHelper extends AtomicHelper { - final AtomicReferenceFieldUpdater waiterThreadUpdater; - final AtomicReferenceFieldUpdater waiterNextUpdater; - final AtomicReferenceFieldUpdater waitersUpdater; - final AtomicReferenceFieldUpdater - listenersUpdater; - final AtomicReferenceFieldUpdater valueUpdater; - - SafeAtomicHelper( - AtomicReferenceFieldUpdater waiterThreadUpdater, - AtomicReferenceFieldUpdater waiterNextUpdater, - AtomicReferenceFieldUpdater waitersUpdater, - AtomicReferenceFieldUpdater listenersUpdater, - AtomicReferenceFieldUpdater valueUpdater) { - this.waiterThreadUpdater = waiterThreadUpdater; - this.waiterNextUpdater = waiterNextUpdater; - this.waitersUpdater = waitersUpdater; - this.listenersUpdater = listenersUpdater; - this.valueUpdater = valueUpdater; - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - waiterThreadUpdater.lazySet(waiter, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiterNextUpdater.lazySet(waiter, newValue); - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return waitersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return listenersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return valueUpdater.compareAndSet(future, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@code synchronized} and volatile writes. - *

- *

This is an implementation of last resort for when certain basic VM - * features are broken (like AtomicReferenceFieldUpdater). - */ - private static final class SynchronizedHelper extends AtomicHelper { - @Override - void putThread(Waiter waiter, Thread newValue) { - waiter.thread = newValue; - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiter.next = newValue; - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - synchronized (future) { - if (future.waiters == expect) { - future.waiters = update; - return true; - } - return false; - } - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - synchronized (future) { - if (future.listeners == expect) { - future.listeners = update; - return true; - } - return false; - } - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - synchronized (future) { - if (future.value == expect) { - future.value = update; - return true; - } - return false; - } - } - } - - private static CancellationException cancellationExceptionWithCause( - @Nullable String message, @Nullable Throwable cause) { - CancellationException exception = new CancellationException(message); - exception.initCause(cause); - return exception; - } - - /** - * Returns an {@link Executor} that runs each task in the thread that invokes - * {@link Executor#execute execute}, as in {@link CallerRunsPolicy}. - *

- *

This instance is equivalent to:

   {@code
-   *   final class DirectExecutor implements Executor {
-   *     public void execute(Runnable r) {
-   *       r.run();
-   *     }
-   *   }}
- */ - public static Executor directExecutor() { - return DirectExecutor.INSTANCE; - } - - /** - * See {@link #directExecutor} for behavioral notes. - */ - private enum DirectExecutor implements Executor { - INSTANCE; - - @Override - public void execute(Runnable command) { - command.run(); - } - - @Override - public String toString() { - return "MoreExecutors.directExecutor()"; - } - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index d8ba919cefb5..d4cdaf2cfe41 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -92,6 +92,7 @@ public class HddsVolume extends StorageVolume { private File dbParentDir; private File deletedContainerDir; private AtomicBoolean dbLoaded = new AtomicBoolean(false); + private final AtomicBoolean dbLoadFailure = new AtomicBoolean(false); /** * Builder for HddsVolume. @@ -257,6 +258,11 @@ public synchronized VolumeCheckResult check(@Nullable Boolean unused) VolumeCheckResult result = super.check(unused); DatanodeConfiguration df = getConf().getObject(DatanodeConfiguration.class); + if (isDbLoadFailure()) { + LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " + + "the volume might not have been loaded properly.", getStorageDir()); + return VolumeCheckResult.FAILED; + } if (result != VolumeCheckResult.HEALTHY || !df.getContainerSchemaV3Enabled() || !isDbLoaded()) { return result; @@ -313,6 +319,11 @@ public File getDbParentDir() { return this.dbParentDir; } + @VisibleForTesting + public void setDbParentDir(File dbParentDir) { + this.dbParentDir = dbParentDir; + } + public File getDeletedContainerDir() { return this.deletedContainerDir; } @@ -326,6 +337,10 @@ public boolean isDbLoaded() { return dbLoaded.get(); } + public boolean isDbLoadFailure() { + return dbLoadFailure.get(); + } + public void loadDbStore(boolean readOnly) throws IOException { // DN startup for the first time, not registered yet, // so the DbVolume is not formatted. @@ -363,7 +378,8 @@ public void loadDbStore(boolean readOnly) throws IOException { String containerDBPath = containerDBFile.getAbsolutePath(); try { initPerDiskDBStore(containerDBPath, getConf(), readOnly); - } catch (IOException e) { + } catch (Throwable e) { + dbLoadFailure.set(true); throw new IOException("Can't init db instance under path " + containerDBPath + " for volume " + getStorageID(), e); } @@ -417,9 +433,11 @@ public void createDbStore(MutableVolumeSet dbVolumeSet) throws IOException { try { HddsVolumeUtil.initPerDiskDBStore(containerDBPath, getConf(), false); dbLoaded.set(true); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is created and loaded at {} for volume {}", containerDBPath, getStorageID()); } catch (IOException e) { + dbLoadFailure.set(true); String errMsg = "Can't create db instance under path " + containerDBPath + " for volume " + getStorageID(); LOG.error(errMsg, e); @@ -448,6 +466,7 @@ private void closeDbStore() { .getAbsolutePath(); DatanodeStoreCache.getInstance().removeDB(containerDBPath); dbLoaded.set(false); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is stopped at {} for volume {}", containerDBPath, getStorageID()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 3c0b6e618ee1..e195b127d499 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -442,12 +442,20 @@ public Map> getVolumeStateMap() { public boolean hasEnoughVolumes() { // Max number of bad volumes allowed, should have at least // 1 good volume + boolean hasEnoughVolumes; if (maxVolumeFailuresTolerated == StorageVolumeChecker.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - return getVolumesList().size() >= 1; + hasEnoughVolumes = getVolumesList().size() >= 1; } else { - return getFailedVolumesList().size() <= maxVolumeFailuresTolerated; + hasEnoughVolumes = getFailedVolumesList().size() <= maxVolumeFailuresTolerated; } + if (!hasEnoughVolumes) { + LOG.error("Not enough volumes in MutableVolumeSet. DatanodeUUID: {}, VolumeType: {}, " + + "MaxVolumeFailuresTolerated: {}, ActiveVolumes: {}, FailedVolumes: {}", + datanodeUuid, volumeType, maxVolumeFailuresTolerated, + getVolumesList().size(), getFailedVolumesList().size()); + } + return hasEnoughVolumes; } public StorageLocationReport[] getStorageReport() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index d9d5a667b30b..b85ac15c54e4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -456,11 +456,6 @@ public long getAvailable() { } - public long getAvailable(SpaceUsageSource precomputedVolumeSpace) { - return volumeInfo.map(info -> info.getAvailable(precomputedVolumeSpace)) - .orElse(0L); - } - public SpaceUsageSource getCurrentUsage() { return volumeInfo.map(VolumeInfo::getCurrentUsage) .orElse(SpaceUsageSource.UNKNOWN); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java index 4917810bd97c..e81fd1008ff6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java @@ -18,6 +18,11 @@ package org.apache.hadoop.ozone.container.common.volume; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.ArrayList; @@ -38,7 +43,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -46,10 +50,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import jakarta.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java index 991f105d15b2..1548b30c9fb6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java @@ -144,8 +144,7 @@ public Optional> schedule( final ListenableFuture lf; if (diskCheckTimeout > 0) { - lf = TimeoutFuture - .create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, + lf = Futures.withTimeout(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, scheduledExecutorService); } else { lf = lfWithoutTimeout; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java deleted file mode 100644 index 42e2ed5758eb..000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import jakarta.annotation.Nullable; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Implementation of {@code Futures#withTimeout}. - *

- *

Future that delegates to another but will finish early (via a - * {@link TimeoutException} wrapped in an {@link ExecutionException}) if the - * specified duration expires. The delegate future is interrupted and - * cancelled if it times out. - */ -final class TimeoutFuture extends AbstractFuture.TrustedFuture { - public static final Logger LOG = LoggerFactory.getLogger( - TimeoutFuture.class); - - static ListenableFuture create( - ListenableFuture delegate, - long time, - TimeUnit unit, - ScheduledExecutorService scheduledExecutor) { - TimeoutFuture result = new TimeoutFuture(delegate); - TimeoutFuture.Fire fire = new TimeoutFuture.Fire(result); - result.timer = scheduledExecutor.schedule(fire, time, unit); - delegate.addListener(fire, directExecutor()); - return result; - } - - /* - * Memory visibility of these fields. There are two cases to consider. - * - * 1. visibility of the writes to these fields to Fire.run: - * - * The initial write to delegateRef is made definitely visible via the - * semantics of addListener/SES.schedule. The later racy write in cancel() - * is not guaranteed to be observed, however that is fine since the - * correctness is based on the atomic state in our base class. The initial - * write to timer is never definitely visible to Fire.run since it is - * assigned after SES.schedule is called. Therefore Fire.run has to check - * for null. However, it should be visible if Fire.run is called by - * delegate.addListener since addListener is called after the assignment - * to timer, and importantly this is the main situation in which we need to - * be able to see the write. - * - * 2. visibility of the writes to an afterDone() call triggered by cancel(): - * - * Since these fields are non-final that means that TimeoutFuture is not - * being 'safely published', thus a motivated caller may be able to expose - * the reference to another thread that would then call cancel() and be - * unable to cancel the delegate. There are a number of ways to solve this, - * none of which are very pretty, and it is currently believed to be a - * purely theoretical problem (since the other actions should supply - * sufficient write-barriers). - */ - - @Nullable private ListenableFuture delegateRef; - @Nullable private Future timer; - - private TimeoutFuture(ListenableFuture delegate) { - this.delegateRef = Preconditions.checkNotNull(delegate); - } - - /** - * A runnable that is called when the delegate or the timer completes. - */ - private static final class Fire implements Runnable { - @Nullable - private TimeoutFuture timeoutFutureRef; - - Fire( - TimeoutFuture timeoutFuture) { - this.timeoutFutureRef = timeoutFuture; - } - - @Override - public void run() { - // If either of these reads return null then we must be after a - // successful cancel or another call to this method. - TimeoutFuture timeoutFuture = timeoutFutureRef; - if (timeoutFuture == null) { - return; - } - ListenableFuture delegate = timeoutFuture.delegateRef; - if (delegate == null) { - return; - } - - /* - * If we're about to complete the TimeoutFuture, we want to release our - * reference to it. Otherwise, we'll pin it (and its result) in memory - * until the timeout task is GCed. (The need to clear our reference to - * the TimeoutFuture is the reason we use a *static* nested class with - * a manual reference back to the "containing" class.) - * - * This has the nice-ish side effect of limiting reentrancy: run() calls - * timeoutFuture.setException() calls run(). That reentrancy would - * already be harmless, since timeoutFuture can be set (and delegate - * cancelled) only once. (And "set only once" is important for other - * reasons: run() can still be invoked concurrently in different threads, - * even with the above null checks.) - */ - timeoutFutureRef = null; - if (delegate.isDone()) { - timeoutFuture.setFuture(delegate); - } else { - try { - timeoutFuture.setException( - new TimeoutException("Future timed out: " + delegate)); - } finally { - delegate.cancel(true); - } - } - } - } - - @Override - protected void afterDone() { - maybePropagateCancellation(delegateRef); - - Future localTimer = timer; - // Try to cancel the timer as an optimization. - // timer may be null if this call to run was by the timer task since there - // is no happens-before edge between the assignment to timer and an - // execution of the timer task. - if (localTimer != null) { - localTimer.cancel(false); - } - - delegateRef = null; - timer = null; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index 6ee35ba6b096..af890269255d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -20,11 +20,9 @@ import java.io.File; import java.io.IOException; -import java.util.Collection; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.StorageSize; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams; @@ -33,10 +31,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT; - /** * Stores information about a disk/volume. * @@ -100,13 +94,6 @@ public final class VolumeInfo { // Space usage calculator private final VolumeUsage usage; - // Capacity configured. This is useful when we want to - // limit the visible capacity for tests. If negative, then we just - // query from the filesystem. - private long configuredCapacity; - - private long reservedInBytes; - /** * Builder for VolumeInfo. */ @@ -115,7 +102,6 @@ public static class Builder { private final String rootDir; private SpaceUsageCheckFactory usageCheckFactory; private StorageType storageType; - private long configuredCapacity; public Builder(String root, ConfigurationSource config) { this.rootDir = root; @@ -127,11 +113,6 @@ public Builder storageType(StorageType st) { return this; } - public Builder configuredCapacity(long capacity) { - this.configuredCapacity = capacity; - return this; - } - public Builder usageCheckFactory(SpaceUsageCheckFactory factory) { this.usageCheckFactory = factory; return this; @@ -142,55 +123,6 @@ public VolumeInfo build() throws IOException { } } - private long getReserved(ConfigurationSource conf) { - if (conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT) - && conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED)) { - LOG.error("Both {} and {} are set. Set either one, not both. If the " + - "volume matches with volume parameter in former config, it is set " + - "as reserved space. If not it fall backs to the latter config.", - HDDS_DATANODE_DIR_DU_RESERVED, HDDS_DATANODE_DIR_DU_RESERVED_PERCENT); - } - - // 1. If hdds.datanode.dir.du.reserved is set for a volume then make it - // as the reserved bytes. - Collection reserveList = conf.getTrimmedStringCollection( - HDDS_DATANODE_DIR_DU_RESERVED); - for (String reserve : reserveList) { - String[] words = reserve.split(":"); - if (words.length < 2) { - LOG.error("Reserved space should config in pair, but current is {}", - reserve); - continue; - } - - if (words[0].trim().equals(rootDir)) { - try { - StorageSize size = StorageSize.parse(words[1].trim()); - return (long) size.getUnit().toBytes(size.getValue()); - } catch (Exception e) { - LOG.error("Failed to parse StorageSize: {}", words[1].trim(), e); - break; - } - } - } - - // 2. If hdds.datanode.dir.du.reserved not set and - // hdds.datanode.dir.du.reserved.percent is set, fall back to this config. - if (conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT)) { - float percentage = conf.getFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, - HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); - if (0 <= percentage && percentage <= 1) { - return (long) Math.ceil(this.usage.getCapacity() * percentage); - } - //If it comes here then the percentage is not between 0-1. - LOG.error("The value of {} should be between 0 to 1. Defaulting to 0.", - HDDS_DATANODE_DIR_DU_RESERVED_PERCENT); - } - - //Both configs are not set, return 0. - return 0; - } - private VolumeInfo(Builder b) throws IOException { this.rootDir = b.rootDir; @@ -206,9 +138,6 @@ private VolumeInfo(Builder b) throws IOException { this.storageType = (b.storageType != null ? b.storageType : StorageType.DEFAULT); - this.configuredCapacity = (b.configuredCapacity != 0 ? - b.configuredCapacity : -1); - SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory; if (usageCheckFactory == null) { usageCheckFactory = SpaceUsageCheckFactory.create(b.conf); @@ -216,16 +145,11 @@ private VolumeInfo(Builder b) throws IOException { SpaceUsageCheckParams checkParams = usageCheckFactory.paramsFor(root); - this.usage = new VolumeUsage(checkParams); - this.reservedInBytes = getReserved(b.conf); - this.usage.setReserved(reservedInBytes); + usage = new VolumeUsage(checkParams, b.conf); } public long getCapacity() { - if (configuredCapacity < 0) { - return Math.max(usage.getCapacity() - reservedInBytes, 0); - } - return configuredCapacity; + return usage.getCapacity(); } /** @@ -236,17 +160,11 @@ public long getCapacity() { * A) avail = capacity - used */ public long getAvailable() { - long avail = getCapacity() - usage.getUsedSpace(); - return Math.max(Math.min(avail, usage.getAvailable()), 0); - } - - public long getAvailable(SpaceUsageSource precomputedValues) { - long avail = precomputedValues.getCapacity() - usage.getUsedSpace(); - return Math.max(Math.min(avail, usage.getAvailable(precomputedValues)), 0); + return usage.getAvailable(); } public SpaceUsageSource getCurrentUsage() { - return usage.snapshot(); + return usage.getCurrentUsage(); } public void incrementUsedSpace(long usedSpace) { @@ -285,8 +203,7 @@ public VolumeUsage getUsageForTesting() { return usage; } - @VisibleForTesting public long getReservedInBytes() { - return reservedInBytes; + return usage.getReservedBytes(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index 18e7354ec1da..e59cab0d539f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -32,15 +32,13 @@ public class VolumeInfoMetrics { private String metricsSourceName = VolumeInfoMetrics.class.getSimpleName(); - private String volumeRootStr; - private HddsVolume volume; + private final HddsVolume volume; /** - * @param identifier Typically, path to volume root. e.g. /data/hdds + * @param identifier Typically, path to volume root. E.g. /data/hdds */ public VolumeInfoMetrics(String identifier, HddsVolume ref) { this.metricsSourceName += '-' + identifier; - this.volumeRootStr = identifier; this.volume = ref; init(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index b2a66ba16b4a..d18998821b1e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -18,38 +18,62 @@ package org.apache.hadoop.ozone.container.common.volume; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageSize; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.fs.CachingSpaceUsageSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams; import org.apache.hadoop.hdds.fs.SpaceUsageSource; +import org.apache.ratis.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; +import java.io.IOException; +import java.util.Collection; + import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT; /** * Class that wraps the space df of the Datanode Volumes used by SCM * containers. */ -public class VolumeUsage implements SpaceUsageSource { +public class VolumeUsage { private final CachingSpaceUsageSource source; private boolean shutdownComplete; - private long reservedInBytes; + private final long reservedInBytes; private static final Logger LOG = LoggerFactory.getLogger(VolumeUsage.class); - VolumeUsage(SpaceUsageCheckParams checkParams) { + VolumeUsage(SpaceUsageCheckParams checkParams, ConfigurationSource conf) { source = new CachingSpaceUsageSource(checkParams); + reservedInBytes = getReserved(conf, checkParams.getPath(), source.getCapacity()); + Preconditions.assertTrue(reservedInBytes >= 0, reservedInBytes + " < 0"); start(); // TODO should start only on demand } - @Override + @VisibleForTesting + SpaceUsageSource realUsage() { + return source.snapshot(); + } + public long getCapacity() { - return Math.max(source.getCapacity(), 0); + return getCurrentUsage().getCapacity(); + } + + public long getAvailable() { + return getCurrentUsage().getAvailable(); + } + + public long getUsedSpace() { + return getCurrentUsage().getUsedSpace(); } /** @@ -60,24 +84,15 @@ public long getCapacity() { * remainingReserved * B) avail = fsAvail - Max(reserved - other, 0); */ - @Override - public long getAvailable() { - return source.getAvailable() - getRemainingReserved(); - } + public SpaceUsageSource getCurrentUsage() { + SpaceUsageSource real = realUsage(); - public long getAvailable(SpaceUsageSource precomputedVolumeSpace) { - long available = precomputedVolumeSpace.getAvailable(); - return available - getRemainingReserved(precomputedVolumeSpace); - } - - @Override - public long getUsedSpace() { - return source.getUsedSpace(); - } - - @Override - public SpaceUsageSource snapshot() { - return source.snapshot(); + return reservedInBytes == 0 + ? real + : new SpaceUsageSource.Fixed( + Math.max(real.getCapacity() - reservedInBytes, 0), + Math.max(real.getAvailable() - getRemainingReserved(real), 0), + real.getUsedSpace()); } public void incrementUsedSpace(long usedSpace) { @@ -94,19 +109,10 @@ public void decrementUsedSpace(long reclaimedSpace) { * so there could be that DU value > totalUsed when there are deletes. * @return other used space */ - private long getOtherUsed() { - long totalUsed = source.getCapacity() - source.getAvailable(); - return Math.max(totalUsed - source.getUsedSpace(), 0L); - } - - private long getOtherUsed(SpaceUsageSource precomputedVolumeSpace) { + private static long getOtherUsed(SpaceUsageSource precomputedVolumeSpace) { long totalUsed = precomputedVolumeSpace.getCapacity() - precomputedVolumeSpace.getAvailable(); - return Math.max(totalUsed - source.getUsedSpace(), 0L); - } - - private long getRemainingReserved() { - return Math.max(reservedInBytes - getOtherUsed(), 0L); + return Math.max(totalUsed - precomputedVolumeSpace.getUsedSpace(), 0L); } private long getRemainingReserved( @@ -129,8 +135,8 @@ public void refreshNow() { source.refreshNow(); } - public void setReserved(long reserved) { - this.reservedInBytes = reserved; + public long getReservedBytes() { + return reservedInBytes; } /** @@ -174,4 +180,52 @@ public static boolean hasVolumeEnoughSpace(long volumeAvailableSpace, return (volumeAvailableSpace - volumeCommittedBytesCount) > Math.max(requiredSpace, volumeFreeSpaceToSpare); } + + private static long getReserved(ConfigurationSource conf, String rootDir, + long capacity) { + if (conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT) + && conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED)) { + LOG.error("Both {} and {} are set. Set either one, not both. If the " + + "volume matches with volume parameter in former config, it is set " + + "as reserved space. If not it fall backs to the latter config.", + HDDS_DATANODE_DIR_DU_RESERVED, HDDS_DATANODE_DIR_DU_RESERVED_PERCENT); + } + + // 1. If hdds.datanode.dir.du.reserved is set for a volume then make it + // as the reserved bytes. + Collection reserveList = conf.getTrimmedStringCollection( + HDDS_DATANODE_DIR_DU_RESERVED); + for (String reserve : reserveList) { + String[] words = reserve.split(":"); + if (words.length < 2) { + LOG.error("Reserved space should be configured in a pair, but current value is {}", + reserve); + continue; + } + + try { + String path = new File(words[0]).getCanonicalPath(); + if (path.equals(rootDir)) { + StorageSize size = StorageSize.parse(words[1].trim()); + return (long) size.getUnit().toBytes(size.getValue()); + } + } catch (IllegalArgumentException e) { + LOG.error("Failed to parse StorageSize {} from config {}", words[1].trim(), HDDS_DATANODE_DIR_DU_RESERVED, e); + } catch (IOException e) { + LOG.error("Failed to read storage path {} from config {}", words[1].trim(), HDDS_DATANODE_DIR_DU_RESERVED, e); + } + } + + // 2. If hdds.datanode.dir.du.reserved not set then fall back to hdds.datanode.dir.du.reserved.percent, using + // either its set value or default value if it has not been set. + float percentage = conf.getFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, + HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); + if (percentage < 0 || percentage > 1) { + LOG.error("The value of {} should be between 0 to 1. Falling back to default value {}", + HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); + percentage = HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT; + } + + return (long) Math.ceil(capacity * percentage); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCommandInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCommandInfo.java index 648a63be428e..f33a9622d40f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCommandInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCommandInfo.java @@ -17,12 +17,13 @@ */ package org.apache.hadoop.ozone.container.ec.reconstruction; +import com.google.protobuf.ByteString; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand; import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex; -import java.util.Arrays; import java.util.SortedMap; import java.util.TreeMap; import java.util.stream.IntStream; @@ -39,16 +40,14 @@ public class ECReconstructionCommandInfo { private final SortedMap targetNodeMap; private final long containerID; private final ECReplicationConfig ecReplicationConfig; - private final byte[] missingContainerIndexes; + private final ByteString missingContainerIndexes; private final long deadlineMsSinceEpoch; private final long term; public ECReconstructionCommandInfo(ReconstructECContainersCommand cmd) { this.containerID = cmd.getContainerID(); this.ecReplicationConfig = cmd.getEcReplicationConfig(); - this.missingContainerIndexes = - Arrays.copyOf(cmd.getMissingContainerIndexes(), - cmd.getMissingContainerIndexes().length); + this.missingContainerIndexes = cmd.getMissingContainerIndexes(); this.deadlineMsSinceEpoch = cmd.getDeadline(); this.term = cmd.getTerm(); @@ -60,7 +59,7 @@ public ECReconstructionCommandInfo(ReconstructECContainersCommand cmd) { targetNodeMap = IntStream.range(0, cmd.getTargetDatanodes().size()) .boxed() .collect(toMap( - i -> (int) missingContainerIndexes[i], + i -> (int) missingContainerIndexes.byteAt(i), i -> cmd.getTargetDatanodes().get(i), (v1, v2) -> v1, TreeMap::new)); } @@ -90,7 +89,7 @@ public String toString() { return reconstructECContainersCommand + ": containerID=" + containerID + ", replication=" + ecReplicationConfig.getReplication() - + ", missingIndexes=" + Arrays.toString(missingContainerIndexes) + + ", missingIndexes=" + StringUtils.bytes2String(missingContainerIndexes.asReadOnlyByteBuffer()) + ", sources=" + sourceNodeMap + ", targets=" + targetNodeMap; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index 234439a00c24..8fadd19b67d3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.scm.storage.BufferPool; import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; -import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.token.ContainerTokenIdentifier; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.utils.IOUtils; @@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.security.token.Token; +import org.apache.ratis.util.MemoizedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,6 +58,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -70,7 +72,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -101,12 +102,14 @@ public class ECReconstructionCoordinator implements Closeable { private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; + private static final int EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE = 5; + private final ECContainerOperationClient containerOperationClient; private final ByteBufferPool byteBufferPool; - private final ExecutorService ecReconstructExecutor; - + private final ExecutorService ecReconstructReadExecutor; + private final MemoizedSupplier ecReconstructWriteExecutor; private final BlockInputStreamFactory blockInputStreamFactory; private final TokenHelper tokenHelper; private final ContainerClientMetrics clientMetrics; @@ -123,20 +126,18 @@ public ECReconstructionCoordinator( this.containerOperationClient = new ECContainerOperationClient(conf, certificateClient); this.byteBufferPool = new ElasticByteBufferPool(); - ThreadFactory threadFactory = new ThreadFactoryBuilder() - .setNameFormat(threadNamePrefix + "ec-reconstruct-reader-TID-%d") - .build(); ozoneClientConfig = conf.getObject(OzoneClientConfig.class); - this.ecReconstructExecutor = - new ThreadPoolExecutor(EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, - ozoneClientConfig.getEcReconstructStripeReadPoolLimit(), - 60, - TimeUnit.SECONDS, - new SynchronousQueue<>(), - threadFactory, - new ThreadPoolExecutor.CallerRunsPolicy()); + this.ecReconstructReadExecutor = createThreadPoolExecutor( + EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, + ozoneClientConfig.getEcReconstructStripeReadPoolLimit(), + threadNamePrefix + "ec-reconstruct-reader-TID-%d"); + this.ecReconstructWriteExecutor = MemoizedSupplier.valueOf( + () -> createThreadPoolExecutor( + EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE, + ozoneClientConfig.getEcReconstructStripeWritePoolLimit(), + threadNamePrefix + "ec-reconstruct-writer-TID-%d")); this.blockInputStreamFactory = BlockInputStreamFactoryImpl - .getInstance(byteBufferPool, () -> ecReconstructExecutor); + .getInstance(byteBufferPool, () -> ecReconstructReadExecutor); tokenHelper = new TokenHelper(new SecurityConfig(conf), secretKeyClient); this.clientMetrics = ContainerClientMetrics.acquire(); this.metrics = metrics; @@ -232,7 +233,7 @@ private ECBlockOutputStream getECBlockOutputStream( containerOperationClient.singleNodePipeline(datanodeDetails, repConfig, replicaIndex), BufferPool.empty(), ozoneClientConfig, - blockLocationInfo.getToken(), clientMetrics, streamBufferArgs); + blockLocationInfo.getToken(), clientMetrics, streamBufferArgs, ecReconstructWriteExecutor); } @VisibleForTesting @@ -247,100 +248,105 @@ public void reconstructECBlockGroup(BlockLocationInfo blockLocationInfo, int dataLocs = ECBlockInputStreamProxy .expectedDataLocations(repConfig, safeBlockGroupLength); List toReconstructIndexes = new ArrayList<>(); + List notReconstructIndexes = new ArrayList<>(); for (Integer index : missingContainerIndexes) { if (index <= dataLocs || index > repConfig.getData()) { toReconstructIndexes.add(index); + } else { + // Don't need to be reconstructed, but we do need a stream to write + // the block data to. + notReconstructIndexes.add(index); } - // else padded indexes. - } - - // Looks like we don't need to reconstruct any missing blocks in this block - // group. The reason for this should be block group had only padding blocks - // in the missing locations. - if (toReconstructIndexes.size() == 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("Skipping the reconstruction for the block: " - + blockLocationInfo.getBlockID() + ". In the missing locations: " - + missingContainerIndexes - + ", this block group has only padded blocks."); - } - return; } + OzoneClientConfig clientConfig = this.ozoneClientConfig; + clientConfig.setChecksumVerify(true); try (ECBlockReconstructedStripeInputStream sis = new ECBlockReconstructedStripeInputStream( - repConfig, blockLocationInfo, true, + repConfig, blockLocationInfo, this.containerOperationClient.getXceiverClientManager(), null, this.blockInputStreamFactory, byteBufferPool, - this.ecReconstructExecutor)) { + this.ecReconstructReadExecutor, + clientConfig)) { ECBlockOutputStream[] targetBlockStreams = new ECBlockOutputStream[toReconstructIndexes.size()]; + ECBlockOutputStream[] emptyBlockStreams = + new ECBlockOutputStream[notReconstructIndexes.size()]; ByteBuffer[] bufs = new ByteBuffer[toReconstructIndexes.size()]; try { + // Create streams and buffers for all indexes that need reconstructed for (int i = 0; i < toReconstructIndexes.size(); i++) { int replicaIndex = toReconstructIndexes.get(i); - DatanodeDetails datanodeDetails = - targetMap.get(replicaIndex); - targetBlockStreams[i] = getECBlockOutputStream(blockLocationInfo, - datanodeDetails, repConfig, replicaIndex - ); + DatanodeDetails datanodeDetails = targetMap.get(replicaIndex); + targetBlockStreams[i] = getECBlockOutputStream(blockLocationInfo, datanodeDetails, repConfig, replicaIndex); bufs[i] = byteBufferPool.getBuffer(false, repConfig.getEcChunkSize()); - // Make sure it's clean. Don't want to reuse the erroneously returned - // buffers from the pool. bufs[i].clear(); } + // Then create a stream for all indexes that don't need reconstructed, but still need a stream to + // write the empty block data to. + for (int i = 0; i < notReconstructIndexes.size(); i++) { + int replicaIndex = notReconstructIndexes.get(i); + DatanodeDetails datanodeDetails = targetMap.get(replicaIndex); + emptyBlockStreams[i] = getECBlockOutputStream(blockLocationInfo, datanodeDetails, repConfig, replicaIndex); + } - sis.setRecoveryIndexes(toReconstructIndexes.stream().map(i -> (i - 1)) - .collect(Collectors.toSet())); - long length = safeBlockGroupLength; - while (length > 0) { - int readLen; - try { - readLen = sis.recoverChunks(bufs); - Set failedIndexes = sis.getFailedIndexes(); - if (!failedIndexes.isEmpty()) { - // There was a problem reading some of the block indexes, but we - // did not get an exception as there must have been spare indexes - // to try and recover from. Therefore we should log out the block - // group details in the same way as for the exception case below. + if (toReconstructIndexes.size() > 0) { + sis.setRecoveryIndexes(toReconstructIndexes.stream().map(i -> (i - 1)) + .collect(Collectors.toSet())); + long length = safeBlockGroupLength; + while (length > 0) { + int readLen; + try { + readLen = sis.recoverChunks(bufs); + Set failedIndexes = sis.getFailedIndexes(); + if (!failedIndexes.isEmpty()) { + // There was a problem reading some of the block indexes, but we + // did not get an exception as there must have been spare indexes + // to try and recover from. Therefore we should log out the block + // group details in the same way as for the exception case below. + logBlockGroupDetails(blockLocationInfo, repConfig, + blockDataGroup); + } + } catch (IOException e) { + // When we see exceptions here, it could be due to some transient + // issue that causes the block read to fail when reconstructing it, + // but we have seen issues where the containers don't have the + // blocks they appear they should have, or the block chunks are the + // wrong length etc. In order to debug these sort of cases, if we + // get an error, we will log out the details about the block group + // length on each source, along with their chunk list and chunk + // lengths etc. logBlockGroupDetails(blockLocationInfo, repConfig, blockDataGroup); + throw e; } - } catch (IOException e) { - // When we see exceptions here, it could be due to some transient - // issue that causes the block read to fail when reconstructing it, - // but we have seen issues where the containers don't have the - // blocks they appear they should have, or the block chunks are the - // wrong length etc. In order to debug these sort of cases, if we - // get an error, we will log out the details about the block group - // length on each source, along with their chunk list and chunk - // lengths etc. - logBlockGroupDetails(blockLocationInfo, repConfig, - blockDataGroup); - throw e; - } - // TODO: can be submitted in parallel - for (int i = 0; i < bufs.length; i++) { - CompletableFuture - future = targetBlockStreams[i].write(bufs[i]); - checkFailures(targetBlockStreams[i], future); - bufs[i].clear(); + // TODO: can be submitted in parallel + for (int i = 0; i < bufs.length; i++) { + if (bufs[i].remaining() != 0) { + // If the buffer is empty, we don't need to write it as it will cause + // an empty chunk to be added to the end of the block. + CompletableFuture + future = targetBlockStreams[i].write(bufs[i]); + checkFailures(targetBlockStreams[i], future); + } + bufs[i].clear(); + } + length -= readLen; } - length -= readLen; } - - for (ECBlockOutputStream targetStream : targetBlockStreams) { - targetStream.executePutBlock(true, true, - blockLocationInfo.getLength(), blockDataGroup); - checkFailures(targetStream, - targetStream.getCurrentPutBlkResponseFuture()); + List allStreams = new ArrayList<>(Arrays.asList(targetBlockStreams)); + allStreams.addAll(Arrays.asList(emptyBlockStreams)); + for (ECBlockOutputStream targetStream : allStreams) { + targetStream.executePutBlock(true, true, blockLocationInfo.getLength(), blockDataGroup); + checkFailures(targetStream, targetStream.getCurrentPutBlkResponseFuture()); } } finally { for (ByteBuffer buf : bufs) { byteBufferPool.putBuffer(buf); } IOUtils.cleanupWithLogger(LOG, targetBlockStreams); + IOUtils.cleanupWithLogger(LOG, emptyBlockStreams); } } } @@ -457,6 +463,9 @@ public void close() throws IOException { if (containerOperationClient != null) { containerOperationClient.close(); } + if (ecReconstructWriteExecutor.isInitialized()) { + ecReconstructWriteExecutor.get().shutdownNow(); + } } private Pipeline rebuildInputPipeline(ECReplicationConfig repConfig, @@ -590,4 +599,12 @@ OptionalLong getTermOfLeaderSCM() { .map(StateContext::getTermOfLeaderSCM) .orElse(OptionalLong.empty()); } + + private static ExecutorService createThreadPoolExecutor( + int corePoolSize, int maximumPoolSize, String threadNameFormat) { + return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, + 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index ab78c6055cdf..70539111fb99 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdfs.util.Canceler; @@ -45,7 +46,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import java.util.Arrays; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; @@ -345,7 +345,7 @@ private ScanResult scanBlock(BlockData block, DataTransferThrottler throttler, File chunkFile; try { chunkFile = layout.getChunkFile(onDiskContainerData, - block.getBlockID(), ChunkInfo.getFromProtoBuf(chunk)); + block.getBlockID(), chunk.getChunkName()); } catch (IOException ex) { return ScanResult.unhealthy( ScanResult.FailureType.MISSING_CHUNK_FILE, @@ -421,8 +421,8 @@ private static ScanResult verifyChecksum(BlockData block, " for block %s", ChunkInfo.getFromProtoBuf(chunk), i, - Arrays.toString(expected.toByteArray()), - Arrays.toString(actual.toByteArray()), + StringUtils.bytes2Hex(expected.asReadOnlyByteBuffer()), + StringUtils.bytes2Hex(actual.asReadOnlyByteBuffer()), block.getBlockID()); return ScanResult.unhealthy( ScanResult.FailureType.CORRUPT_CHUNK, chunkFile, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java index 2678d04dfe86..03050308a946 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.ozone.container.keyvalue; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; -import com.google.gson.JsonPrimitive; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.JsonNodeFactory; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; @@ -181,14 +180,10 @@ public String process(ContainerData containerData, DatanodeStore store, return null; } - JsonObject containerJson = inspectContainer(kvData, store); + ObjectNode containerJson = inspectContainer(kvData, store); boolean correct = checkAndRepair(containerJson, kvData, store); - Gson gson = new GsonBuilder() - .setPrettyPrinting() - .serializeNulls() - .create(); - String jsonReport = gson.toJson(containerJson); + String jsonReport = JsonUtils.toJsonStringWIthIndent(containerJson); if (log != null) { if (correct) { log.trace(jsonReport); @@ -196,40 +191,38 @@ public String process(ContainerData containerData, DatanodeStore store, log.error(jsonReport); } } + return jsonReport; } - static JsonObject inspectContainer(KeyValueContainerData containerData, + static ObjectNode inspectContainer(KeyValueContainerData containerData, DatanodeStore store) { - JsonObject containerJson = new JsonObject(); + ObjectNode containerJson = JsonUtils.createObjectNode(null); try { // Build top level container properties. - containerJson.addProperty("containerID", containerData.getContainerID()); + containerJson.put("containerID", containerData.getContainerID()); String schemaVersion = containerData.getSchemaVersion(); - containerJson.addProperty("schemaVersion", schemaVersion); - containerJson.addProperty("containerState", - containerData.getState().toString()); - containerJson.addProperty("currentDatanodeID", + containerJson.put("schemaVersion", schemaVersion); + containerJson.put("containerState", containerData.getState().toString()); + containerJson.put("currentDatanodeID", containerData.getVolume().getDatanodeUuid()); - containerJson.addProperty("originDatanodeID", - containerData.getOriginNodeId()); + containerJson.put("originDatanodeID", containerData.getOriginNodeId()); // Build DB metadata values. - Table metadataTable = store.getMetadataTable(); - JsonObject dBMetadata = getDBMetadataJson(metadataTable, containerData); - containerJson.add("dBMetadata", dBMetadata); + // Assuming getDBMetadataJson and getAggregateValues methods return ObjectNode and are refactored to use Jackson + ObjectNode dBMetadata = getDBMetadataJson(store.getMetadataTable(), containerData); + containerJson.set("dBMetadata", dBMetadata); // Build aggregate values. - JsonObject aggregates = getAggregateValues(store, - containerData, schemaVersion); - containerJson.add("aggregates", aggregates); + ObjectNode aggregates = getAggregateValues(store, containerData, schemaVersion); + containerJson.set("aggregates", aggregates); // Build info about chunks directory. - JsonObject chunksDirectory = - getChunksDirectoryJson(new File(containerData.getChunksPath())); - containerJson.add("chunksDirectory", chunksDirectory); + // Assuming getChunksDirectoryJson method returns ObjectNode and is refactored to use Jackson + ObjectNode chunksDirectory = getChunksDirectoryJson(new File(containerData.getChunksPath())); + containerJson.set("chunksDirectory", chunksDirectory); } catch (IOException ex) { LOG.error("Inspecting container {} failed", containerData.getContainerID(), ex); @@ -238,28 +231,29 @@ static JsonObject inspectContainer(KeyValueContainerData containerData, return containerJson; } - static JsonObject getDBMetadataJson(Table metadataTable, + static ObjectNode getDBMetadataJson(Table metadataTable, KeyValueContainerData containerData) throws IOException { - JsonObject dBMetadata = new JsonObject(); + ObjectNode dBMetadata = JsonUtils.createObjectNode(null); - dBMetadata.addProperty(OzoneConsts.BLOCK_COUNT, + dBMetadata.put(OzoneConsts.BLOCK_COUNT, metadataTable.get(containerData.getBlockCountKey())); - dBMetadata.addProperty(OzoneConsts.CONTAINER_BYTES_USED, + dBMetadata.put(OzoneConsts.CONTAINER_BYTES_USED, metadataTable.get(containerData.getBytesUsedKey())); - dBMetadata.addProperty(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, + dBMetadata.put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, metadataTable.get(containerData.getPendingDeleteBlockCountKey())); - dBMetadata.addProperty(OzoneConsts.DELETE_TRANSACTION_KEY, + dBMetadata.put(OzoneConsts.DELETE_TRANSACTION_KEY, metadataTable.get(containerData.getLatestDeleteTxnKey())); - dBMetadata.addProperty(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, + dBMetadata.put(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, metadataTable.get(containerData.getBcsIdKey())); return dBMetadata; } - static JsonObject getAggregateValues(DatanodeStore store, + static ObjectNode getAggregateValues(DatanodeStore store, KeyValueContainerData containerData, String schemaVersion) throws IOException { - JsonObject aggregates = new JsonObject(); + + ObjectNode aggregates = JsonUtils.createObjectNode(null); long usedBytesTotal = 0; long blockCountTotal = 0; @@ -308,19 +302,19 @@ static JsonObject getAggregateValues(DatanodeStore store, "container schema " + schemaVersion); } - aggregates.addProperty("blockCount", blockCountTotal); - aggregates.addProperty("usedBytes", usedBytesTotal); + aggregates.put("blockCount", blockCountTotal); + aggregates.put("usedBytes", usedBytesTotal); pendingDelete.addToJson(aggregates); return aggregates; } - static JsonObject getChunksDirectoryJson(File chunksDir) throws IOException { - JsonObject chunksDirectory = new JsonObject(); + static ObjectNode getChunksDirectoryJson(File chunksDir) throws IOException { + ObjectNode chunksDirectory = JsonUtils.createObjectNode(null); - chunksDirectory.addProperty("path", chunksDir.getAbsolutePath()); + chunksDirectory.put("path", chunksDir.getAbsolutePath()); boolean chunksDirPresent = FileUtils.isDirectory(chunksDir); - chunksDirectory.addProperty("present", chunksDirPresent); + chunksDirectory.put("present", chunksDirPresent); long fileCount = 0; if (chunksDirPresent) { @@ -328,43 +322,37 @@ static JsonObject getChunksDirectoryJson(File chunksDir) throws IOException { fileCount = stream.count(); } } - chunksDirectory.addProperty("fileCount", fileCount); + chunksDirectory.put("fileCount", fileCount); return chunksDirectory; } - private boolean checkAndRepair(JsonObject parent, + private boolean checkAndRepair(ObjectNode parent, KeyValueContainerData containerData, DatanodeStore store) { - JsonArray errors = new JsonArray(); + ArrayNode errors = JsonUtils.createArrayNode(); boolean passed = true; Table metadataTable = store.getMetadataTable(); - final JsonObject dBMetadata = parent.getAsJsonObject("dBMetadata"); - final JsonObject aggregates = parent.getAsJsonObject("aggregates"); + ObjectNode dBMetadata = (ObjectNode) parent.get("dBMetadata"); + ObjectNode aggregates = (ObjectNode) parent.get("aggregates"); // Check and repair block count. - JsonElement blockCountDB = parent.getAsJsonObject("dBMetadata") - .get(OzoneConsts.BLOCK_COUNT); - - JsonElement blockCountAggregate = parent.getAsJsonObject("aggregates") - .get("blockCount"); + JsonNode blockCountDB = dBMetadata.get(OzoneConsts.BLOCK_COUNT); + JsonNode blockCountAggregate = aggregates.get("blockCount"); // If block count is absent from the DB, it is only an error if there are // a non-zero amount of block keys in the DB. - long blockCountDBLong = 0; - if (!blockCountDB.isJsonNull()) { - blockCountDBLong = blockCountDB.getAsLong(); - } + long blockCountDBLong = blockCountDB.isNull() ? 0 : blockCountDB.asLong(); - if (blockCountDBLong != blockCountAggregate.getAsLong()) { + if (blockCountDBLong != blockCountAggregate.asLong()) { passed = false; BooleanSupplier keyRepairAction = () -> { boolean repaired = false; try { metadataTable.put(containerData.getBlockCountKey(), - blockCountAggregate.getAsLong()); + blockCountAggregate.asLong()); repaired = true; } catch (IOException ex) { LOG.error("Error repairing block count for container {}.", @@ -373,33 +361,31 @@ private boolean checkAndRepair(JsonObject parent, return repaired; }; - JsonObject blockCountError = buildErrorAndRepair("dBMetadata." + + ObjectNode blockCountError = buildErrorAndRepair("dBMetadata." + OzoneConsts.BLOCK_COUNT, blockCountAggregate, blockCountDB, keyRepairAction); errors.add(blockCountError); } // Check and repair used bytes. - JsonElement usedBytesDB = parent.getAsJsonObject("dBMetadata") - .get(OzoneConsts.CONTAINER_BYTES_USED); - JsonElement usedBytesAggregate = parent.getAsJsonObject("aggregates") - .get("usedBytes"); + JsonNode usedBytesDB = parent.path("dBMetadata").path(OzoneConsts.CONTAINER_BYTES_USED); + JsonNode usedBytesAggregate = parent.path("aggregates").path("usedBytes"); // If used bytes is absent from the DB, it is only an error if there is // a non-zero aggregate of used bytes among the block keys. long usedBytesDBLong = 0; - if (!usedBytesDB.isJsonNull()) { - usedBytesDBLong = usedBytesDB.getAsLong(); + if (!usedBytesDB.isNull()) { + usedBytesDBLong = usedBytesDB.asLong(); } - if (usedBytesDBLong != usedBytesAggregate.getAsLong()) { + if (usedBytesDBLong != usedBytesAggregate.asLong()) { passed = false; BooleanSupplier keyRepairAction = () -> { boolean repaired = false; try { metadataTable.put(containerData.getBytesUsedKey(), - usedBytesAggregate.getAsLong()); + usedBytesAggregate.asLong()); repaired = true; } catch (IOException ex) { LOG.error("Error repairing used bytes for container {}.", @@ -408,18 +394,16 @@ private boolean checkAndRepair(JsonObject parent, return repaired; }; - JsonObject usedBytesError = buildErrorAndRepair("dBMetadata." + - OzoneConsts.CONTAINER_BYTES_USED, usedBytesAggregate, usedBytesDB, - keyRepairAction); + ObjectNode usedBytesError = buildErrorAndRepair("dBMetadata." + + OzoneConsts.CONTAINER_BYTES_USED, usedBytesAggregate, usedBytesDB, keyRepairAction); errors.add(usedBytesError); } // check and repair if db delete count mismatches delete transaction count. - final JsonElement pendingDeleteCountDB = dBMetadata.get( + JsonNode pendingDeleteCountDB = dBMetadata.path( OzoneConsts.PENDING_DELETE_BLOCK_COUNT); final long dbDeleteCount = jsonToLong(pendingDeleteCountDB); - final JsonElement pendingDeleteCountAggregate - = aggregates.get(PendingDelete.COUNT); + final JsonNode pendingDeleteCountAggregate = aggregates.path(PendingDelete.COUNT); final long deleteTransactionCount = jsonToLong(pendingDeleteCountAggregate); if (dbDeleteCount != deleteTransactionCount) { passed = false; @@ -437,17 +421,15 @@ private boolean checkAndRepair(JsonObject parent, return false; }; - final JsonObject deleteCountError = buildErrorAndRepair( + final ObjectNode deleteCountError = buildErrorAndRepair( "dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_COUNT, - pendingDeleteCountAggregate, pendingDeleteCountDB, - deleteCountRepairAction); + pendingDeleteCountAggregate, pendingDeleteCountDB, deleteCountRepairAction); errors.add(deleteCountError); } // check and repair chunks dir. - JsonElement chunksDirPresent = parent.getAsJsonObject("chunksDirectory") - .get("present"); - if (!chunksDirPresent.getAsBoolean()) { + JsonNode chunksDirPresent = parent.path("chunksDirectory").path("present"); + if (!chunksDirPresent.asBoolean()) { passed = false; BooleanSupplier dirRepairAction = () -> { @@ -463,32 +445,32 @@ private boolean checkAndRepair(JsonObject parent, return repaired; }; - JsonObject chunksDirError = buildErrorAndRepair("chunksDirectory.present", - new JsonPrimitive(true), chunksDirPresent, dirRepairAction); + ObjectNode chunksDirError = buildErrorAndRepair("chunksDirectory.present", + JsonNodeFactory.instance.booleanNode(true), chunksDirPresent, dirRepairAction); errors.add(chunksDirError); } - parent.addProperty("correct", passed); - parent.add("errors", errors); + parent.put("correct", passed); + parent.set("errors", errors); return passed; } - static long jsonToLong(JsonElement e) { - return e == null || e.isJsonNull() ? 0 : e.getAsLong(); + private static long jsonToLong(JsonNode e) { + return e == null || e.isNull() ? 0 : e.asLong(); } - private JsonObject buildErrorAndRepair(String property, JsonElement expected, - JsonElement actual, BooleanSupplier repairAction) { - JsonObject error = new JsonObject(); - error.addProperty("property", property); - error.add("expected", expected); - error.add("actual", actual); + private ObjectNode buildErrorAndRepair(String property, JsonNode expected, + JsonNode actual, BooleanSupplier repairAction) { + ObjectNode error = JsonUtils.createObjectNode(null); + error.put("property", property); + error.set("expected", expected); + error.set("actual", actual); boolean repaired = false; if (mode == Mode.REPAIR) { repaired = repairAction.getAsBoolean(); } - error.addProperty("repaired", repaired); + error.put("repaired", repaired); return error; } @@ -505,9 +487,9 @@ static class PendingDelete { this.bytes = bytes; } - void addToJson(JsonObject json) { - json.addProperty(COUNT, count); - json.addProperty(BYTES, bytes); + void addToJson(ObjectNode json) { + json.put(COUNT, count); + json.put(BYTES, bytes); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 59009ef9dfef..05979d85fac3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -103,6 +103,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.PUT_SMALL_FILE_ERROR; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockDataResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockLengthResponse; +import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getEchoResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getFinalizeBlockResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getGetSmallFileResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getListBlockResponse; @@ -111,6 +112,7 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadContainerResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getSuccessResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getSuccessResponseBuilder; +import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getWriteChunkResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.malformedRequest; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.putBlockResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; @@ -167,9 +169,9 @@ public KeyValueHandler(ConfigurationSource config, // Requests. final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); containerCreationLocks = Striped.lazyWeakLock( @@ -279,6 +281,8 @@ static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, return handler.handleGetCommittedBlockLength(request, kvContainer); case FinalizeBlock: return handler.handleFinalizeBlock(request, kvContainer); + case Echo: + return handler.handleEcho(request, kvContainer); default: return null; } @@ -611,6 +615,11 @@ ContainerCommandResponseProto handleFinalizeBlock( return getFinalizeBlockResponse(request, responseData); } + ContainerCommandResponseProto handleEcho( + ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + return getEchoResponse(request); + } + /** * Handle Get Block operation. Calls BlockManager to process the request. */ @@ -772,11 +781,14 @@ ContainerCommandResponseProto handleReadChunk( data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext); + LOG.debug("read chunk from block {} chunk {}", blockID, chunkInfo); // Validate data only if the read chunk is issued by Ratis for its // internal logic. // For client reads, the client is expected to validate. if (DispatcherContext.op(dispatcherContext).readFromTmpFile()) { validateChunkChecksumData(data, chunkInfo); + metrics.incBytesReadStateMachine(chunkInfo.getLen()); + metrics.incNumReadStateMachine(); } metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen()); } catch (StorageContainerException ex) { @@ -809,7 +821,7 @@ private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info) throws StorageContainerException { if (validateChunkChecksumData) { try { - Checksum.verifyChecksum(data, info.getChecksumData(), 0); + Checksum.verifyChecksum(data.duplicate(data.position(), data.limit()), info.getChecksumData(), 0); } catch (OzoneChecksumException ex) { throw ChunkUtils.wrapInStorageContainerException(ex); } @@ -831,6 +843,7 @@ ContainerCommandResponseProto handleWriteChunk( return malformedRequest(request); } + ContainerProtos.BlockData blockDataProto = null; try { checkContainerOpen(kvContainer); @@ -854,6 +867,28 @@ ContainerCommandResponseProto handleWriteChunk( chunkManager .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); + final boolean isCommit = dispatcherContext.getStage().isCommit(); + if (isCommit && writeChunk.hasBlock()) { + metrics.incContainerOpsMetrics(Type.PutBlock); + BlockData blockData = BlockData.getFromProtoBuf( + writeChunk.getBlock().getBlockData()); + // optimization for hsync when WriteChunk is in commit phase: + // + // block metadata is piggybacked in the same message. + // there will not be an additional PutBlock request. + // + // End of block will always be sent as a standalone PutBlock. + // the PutBlock piggybacked in WriteChunk is never end of block. + // + // do not do this in WRITE_DATA phase otherwise PutBlock will be out + // of order. + blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex()); + blockManager.putBlock(kvContainer, blockData, false); + blockDataProto = blockData.getProtoBufMessage(); + final long numBytes = blockDataProto.getSerializedSize(); + metrics.incContainerBytesStats(Type.PutBlock, numBytes); + } + // We should increment stats after writeChunk if (isWrite) { metrics.incContainerBytesStats(Type.WriteChunk, writeChunk @@ -867,7 +902,7 @@ ContainerCommandResponseProto handleWriteChunk( request); } - return getSuccessResponse(request); + return getWriteChunkResponseSuccess(request, blockDataProto); } /** @@ -910,9 +945,9 @@ ContainerCommandResponseProto handlePutSmallFile( // chunks will be committed as a part of handling putSmallFile // here. There is no need to maintain this info in openContainerBlockMap. + validateChunkChecksumData(data, chunkInfo); chunkManager .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); - validateChunkChecksumData(data, chunkInfo); chunkManager.finishWriteChunks(kvContainer, blockData); List chunks = new LinkedList<>(); @@ -1435,16 +1470,22 @@ private void triggerVolumeScanAndThrowException(Container container, private ContainerCommandResponseProto checkFaultInjector(ContainerCommandRequestProto request) { if (injector != null) { - Throwable ex = injector.getException(); - if (ex != null) { - // reset injector - injector = null; - return ContainerUtils.logAndReturnError(LOG, (StorageContainerException) ex, request); - } - try { - injector.pause(); - } catch (IOException e) { - // do nothing + synchronized (injector) { + ContainerProtos.Type type = injector.getType(); + if (request.getCmdType().equals(type) || type == null) { + Throwable ex = injector.getException(); + if (ex != null) { + if (type == null) { + injector = null; + } + return ContainerUtils.logAndReturnError(LOG, (StorageContainerException) ex, request); + } + try { + injector.pause(); + } catch (IOException e) { + // do nothing + } + } } } return null; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java index 7266904139df..c3f6ac921206 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java @@ -377,8 +377,8 @@ public static boolean isOverWriteRequested(FileChannel channel, ChunkInfo * @param chunkInfo - Chunk info * @return true if the user asks for it. */ - public static boolean isOverWritePermitted(ChunkInfo chunkInfo) { - String overWrite = chunkInfo.getMetadata().get(OzoneConsts.CHUNK_OVERWRITE); + private static boolean isOverWritePermitted(ChunkInfo chunkInfo) { + String overWrite = chunkInfo.getMetadata(OzoneConsts.CHUNK_OVERWRITE); return Boolean.parseBoolean(overWrite); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index a45055821a41..e966a0bed862 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -124,15 +124,13 @@ private static String getContainerSubDirectory(long containerId) { */ public static File getContainerDBFile(KeyValueContainerData containerData) { if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + Preconditions.checkNotNull(containerData.getVolume().getDbParentDir(), "Base Directory cannot be null"); return new File(containerData.getVolume().getDbParentDir(), OzoneConsts.CONTAINER_DB_NAME); } - return getContainerDBFile(containerData.getMetadataPath(), containerData); - } - - public static File getContainerDBFile(String baseDir, - KeyValueContainerData containerData) { - return new File(baseDir, containerData.getContainerID() + + Preconditions.checkNotNull(containerData.getMetadataPath(), "Metadata Directory cannot be null"); + return new File(containerData.getMetadataPath(), containerData.getContainerID() + OzoneConsts.DN_CONTAINER_DB); } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 55e35f5741ed..b7d5b5fa59eb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -23,7 +23,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.List; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -32,7 +31,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -65,12 +63,6 @@ private KeyValueContainerUtil() { private static final Logger LOG = LoggerFactory.getLogger( KeyValueContainerUtil.class); - /** - * - * @param containerMetaDataPath - * @throws IOException - */ - /** * creates metadata path, chunks path and metadata DB for the specified * container. @@ -434,46 +426,9 @@ private static void initializeUsedBytesAndBlockCount(DatanodeStore store, } public static long getBlockLength(BlockData block) throws IOException { - long blockLen = 0; - List chunkInfoList = block.getChunks(); - - for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { - ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); - blockLen += info.getLen(); - } - - return blockLen; - } - - /** - * Returns the path where data or chunks live for a given container. - * - * @param kvContainerData - KeyValueContainerData - * @return - Path to the chunks directory - */ - public static Path getDataDirectory(KeyValueContainerData kvContainerData) { - - String chunksPath = kvContainerData.getChunksPath(); - Preconditions.checkNotNull(chunksPath); - - return Paths.get(chunksPath); - } - - /** - * Container metadata directory -- here is where the RocksDB and - * .container file lives. - * - * @param kvContainerData - KeyValueContainerData - * @return Path to the metadata directory - */ - public static Path getMetadataDirectory( - KeyValueContainerData kvContainerData) { - - String metadataPath = kvContainerData.getMetadataPath(); - Preconditions.checkNotNull(metadataPath); - - return Paths.get(metadataPath); - + return block.getChunks().stream() + .mapToLong(ContainerProtos.ChunkInfo::getLen) + .sum(); } public static boolean isSameSchemaVersion(String schema, String other) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index e40434f508e6..27a40400b7a8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -42,7 +42,6 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT; @@ -55,7 +54,8 @@ */ public class BlockManagerImpl implements BlockManager { - static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class); + public static final Logger LOG = + LoggerFactory.getLogger(BlockManagerImpl.class); private ConfigurationSource config; @@ -66,6 +66,7 @@ public class BlockManagerImpl implements BlockManager { // Default Read Buffer capacity when Checksum is not present private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; + private boolean incrementalEnabled; /** * Constructs a Block Manager. @@ -81,6 +82,15 @@ public BlockManagerImpl(ConfigurationSource conf) { this.readMappedBufferThreshold = config.getBufferSize( ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_KEY, ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT); + incrementalEnabled = + config.getBoolean(OZONE_CHUNK_LIST_INCREMENTAL, + OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT); + if (incrementalEnabled && !VersionedDatanodeFeatures.isFinalized( + HDDSLayoutFeature.HBASE_SUPPORT)) { + LOG.warn("DataNode has not finalized upgrading to a version that " + + "supports incremental chunk list. Fallback to full chunk list"); + incrementalEnabled = false; + } } @Override @@ -93,23 +103,12 @@ public long putBlock(Container container, BlockData data, boolean endOfBlock) throws IOException { return persistPutBlock( (KeyValueContainer) container, - data, - config, - endOfBlock); + data, endOfBlock); } - public static long persistPutBlock(KeyValueContainer container, - BlockData data, ConfigurationSource config, boolean endOfBlock) + public long persistPutBlock(KeyValueContainer container, + BlockData data, boolean endOfBlock) throws IOException { - boolean incrementalEnabled = - config.getBoolean(OZONE_CHUNK_LIST_INCREMENTAL, - OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT); - if (incrementalEnabled && !VersionedDatanodeFeatures.isFinalized( - HDDSLayoutFeature.HBASE_SUPPORT)) { - throw new StorageContainerException("DataNode has not finalized " + - "upgrading to a version that supports incremental chunk list.", - UNSUPPORTED_REQUEST); - } Preconditions.checkNotNull(data, "BlockData cannot be null for put " + "operation."); Preconditions.checkState(data.getContainerID() >= 0, "Container Id " + @@ -263,7 +262,7 @@ public BlockData getBlock(Container container, BlockID blockID) long containerBCSId = containerData.getBlockCommitSequenceId(); if (containerBCSId < bcsId) { throw new StorageContainerException( - "Unable to find the block with bcsID " + bcsId + " .Container " + "Unable to find the block with bcsID " + bcsId + ". Container " + containerData.getContainerID() + " bcsId is " + containerBCSId + ".", UNKNOWN_BCSID); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index 1267ed786892..288a2d3e3312 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -51,8 +51,8 @@ private ChunkManagerFactory() { public static ChunkManager createChunkManager(ConfigurationSource conf, BlockManager manager, VolumeSet volSet) { boolean sync = - conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY, - OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); + conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY, + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); boolean persist = conf.getBoolean(HDDS_CONTAINER_PERSISTDATA, HDDS_CONTAINER_PERSISTDATA_DEFAULT); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index 99b68670fadf..a87b184ccecf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -96,7 +96,7 @@ private static void checkLayoutVersion(Container container) { public String streamInit(Container container, BlockID blockID) throws StorageContainerException { checkLayoutVersion(container); - File chunkFile = getChunkFile(container, blockID, null); + final File chunkFile = getChunkFile(container, blockID); return chunkFile.getAbsolutePath(); } @@ -105,7 +105,7 @@ public StateMachine.DataChannel getStreamDataChannel( Container container, BlockID blockID, ContainerMetrics metrics) throws StorageContainerException { checkLayoutVersion(container); - File chunkFile = getChunkFile(container, blockID, null); + final File chunkFile = getChunkFile(container, blockID); return new KeyValueStreamDataChannel(chunkFile, container.getContainerData(), metrics); } @@ -137,7 +137,7 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); - File chunkFile = getChunkFile(container, blockID, info); + final File chunkFile = getChunkFile(container, blockID); long len = info.getLen(); long offset = info.getOffset(); @@ -188,7 +188,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, HddsVolume volume = containerData.getVolume(); - File chunkFile = getChunkFile(container, blockID, info); + final File chunkFile = getChunkFile(container, blockID); final long len = info.getLen(); long offset = info.getOffset(); @@ -213,7 +213,7 @@ public void deleteChunks(Container container, BlockData blockData) @Override public void finishWriteChunks(KeyValueContainer container, BlockData blockData) throws IOException { - File chunkFile = getChunkFile(container, blockData.getBlockID(), null); + final File chunkFile = getChunkFile(container, blockData.getBlockID()); try { files.close(chunkFile); verifyChunkFileExists(chunkFile); @@ -227,7 +227,7 @@ public void finishWriteChunks(KeyValueContainer container, public void finalizeWriteChunk(KeyValueContainer container, BlockID blockId) throws IOException { synchronized (container) { - File chunkFile = getChunkFile(container, blockId, null); + File chunkFile = getChunkFile(container, blockId); try { if (files.isOpen(chunkFile)) { files.close(chunkFile); @@ -247,7 +247,7 @@ private void deleteChunk(Container container, BlockID blockID, Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - File file = getChunkFile(container, blockID, info); + final File file = getChunkFile(container, blockID); // if the chunk file does not exist, it might have already been deleted. // The call might be because of reapply of transactions on datanode @@ -267,10 +267,8 @@ private void deleteChunk(Container container, BlockID blockID, LOG.info("Deleted block file: {}", file); } - private File getChunkFile(Container container, BlockID blockID, - ChunkInfo info) throws StorageContainerException { - return FILE_PER_BLOCK.getChunkFile(container.getContainerData(), blockID, - info); + private static File getChunkFile(Container container, BlockID blockID) throws StorageContainerException { + return FILE_PER_BLOCK.getChunkFile(container.getContainerData(), blockID, null); } private static void checkFullDelete(ChunkInfo info, File chunkFile) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java index 31a340f310b8..a649f573bf08 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java @@ -342,8 +342,7 @@ public void deleteChunks(Container container, BlockData blockData) private static File getChunkFile(KeyValueContainer container, BlockID blockID, ChunkInfo info) throws StorageContainerException { - return FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, - info); + return FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, info.getChunkName()); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java index 185ad9c001b9..7a08c7ef4e84 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.util.Time; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf; import org.apache.ratis.thirdparty.io.netty.buffer.Unpooled; @@ -167,11 +166,7 @@ public int write(ReferenceCountedObject referenceCounted) getMetrics().incContainerOpsMetrics(getType()); assertOpen(); - final long l = Time.monotonicNow(); - int len = writeBuffers(referenceCounted, buffers, super::writeFileChannel); - getMetrics() - .incContainerOpsLatencies(getType(), Time.monotonicNow() - l); - return len; + return writeBuffers(referenceCounted, buffers, this::writeFileChannel); } static int writeBuffers(ReferenceCountedObject src, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java index 810495b2a740..a88f452167eb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.util.Time; import org.apache.ratis.statemachine.StateMachine; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -129,9 +130,11 @@ public void close() throws IOException { final int writeFileChannel(ByteBuffer src) throws IOException { try { + final long startTime = Time.monotonicNow(); final int writeBytes = getChannel().write(src); metrics.incContainerBytesStats(getType(), writeBytes); containerData.updateWriteStats(writeBytes, false); + metrics.incContainerOpsLatencies(getType(), Time.monotonicNow() - startTime); return writeBytes; } catch (IOException e) { checkVolume(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java index ed3142c8570d..7751dba429de 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java @@ -131,7 +131,7 @@ static int getBufferCapacityForChunkRead(ChunkInfo chunkInfo, } else { // Set buffer capacity to checksum boundary size so that each buffer // corresponds to one checksum. If checksum is NONE, then set buffer - // capacity to default (OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY = 64KB). + // capacity to default (OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY = 1MB). ChecksumData checksumData = chunkInfo.getChecksumData(); if (checksumData != null) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java index b2c62dfcbd17..cc83f453ebdf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import java.io.File; @@ -71,9 +70,6 @@ public ConfigurationSource getConfig() { public abstract DBColumnFamilyDefinition getMetadataColumnFamily(); - public abstract DBColumnFamilyDefinition - getDeletedBlocksColumnFamily(); - public DBColumnFamilyDefinition getFinalizeBlocksColumnFamily() { return null; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index c5a59da537ea..26719d7f035a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -62,13 +62,11 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { private Table blockDataTableWithIterator; - private Table deletedBlocksTable; - private Table finalizeBlocksTable; private Table finalizeBlocksTableWithIterator; - static final Logger LOG = + public static final Logger LOG = LoggerFactory.getLogger(AbstractDatanodeStore.class); private volatile DBStore store; private final AbstractDatanodeDBDefinition dbDef; @@ -161,10 +159,6 @@ public void start(ConfigurationSource config) blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); checkTableStatus(blockDataTable, blockDataTable.getName()); - deletedBlocksTable = new DatanodeTable<>( - dbDef.getDeletedBlocksColumnFamily().getTable(this.store)); - checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); - if (dbDef.getFinalizeBlocksColumnFamily() != null) { finalizeBlocksTableWithIterator = dbDef.getFinalizeBlocksColumnFamily().getTable(this.store); @@ -217,7 +211,7 @@ public Table getLastChunkInfoTable() { @Override public Table getDeletedBlocksTable() { - return deletedBlocksTable; + throw new UnsupportedOperationException("DeletedBlocksTable is only supported in Container Schema One"); } @Override @@ -292,7 +286,7 @@ protected Table getFinalizeBlocksTableWithIterator() { return this.finalizeBlocksTableWithIterator; } - private static void checkTableStatus(Table table, String name) + protected static void checkTableStatus(Table table, String name) throws IOException { String logMessage = "Unable to get a reference to %s table. Cannot " + "continue."; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java index d34edb3a48a7..4f54e85da2b1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java @@ -96,7 +96,6 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { return METADATA; } - @Override public DBColumnFamilyDefinition getDeletedBlocksColumnFamily() { return DELETED_BLOCKS; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java index 4d01ae781f29..d47446d49b0f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.utils.db.Proto2Codec; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; @@ -74,15 +73,6 @@ public class DatanodeSchemaThreeDBDefinition Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - DELETED_BLOCKS = - new DBColumnFamilyDefinition<>( - "deleted_blocks", - String.class, - FixedLengthStringCodec.get(), - ChunkInfoList.class, - ChunkInfoList.getCodec()); - public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( @@ -116,7 +106,6 @@ public class DatanodeSchemaThreeDBDefinition COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( BLOCK_DATA, METADATA, - DELETED_BLOCKS, DELETE_TRANSACTION, FINALIZE_BLOCKS, LAST_CHUNK_INFO); @@ -140,7 +129,6 @@ public DatanodeSchemaThreeDBDefinition(String dbPath, BLOCK_DATA.setCfOptions(cfOptions); METADATA.setCfOptions(cfOptions); - DELETED_BLOCKS.setCfOptions(cfOptions); DELETE_TRANSACTION.setCfOptions(cfOptions); FINALIZE_BLOCKS.setCfOptions(cfOptions); LAST_CHUNK_INFO.setCfOptions(cfOptions); @@ -162,12 +150,6 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { return METADATA; } - @Override - public DBColumnFamilyDefinition - getDeletedBlocksColumnFamily() { - return DELETED_BLOCKS; - } - @Override public DBColumnFamilyDefinition getLastChunkInfoColumnFamily() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java index 8d293aba989f..b9e7ec7bd5bf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.utils.db.Proto2Codec; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; @@ -59,15 +58,6 @@ public class DatanodeSchemaTwoDBDefinition Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - DELETED_BLOCKS = - new DBColumnFamilyDefinition<>( - "deleted_blocks", - String.class, - StringCodec.get(), - ChunkInfoList.class, - ChunkInfoList.getCodec()); - public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( @@ -104,7 +94,6 @@ public DatanodeSchemaTwoDBDefinition(String dbPath, COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( BLOCK_DATA, METADATA, - DELETED_BLOCKS, DELETE_TRANSACTION, FINALIZE_BLOCKS, LAST_CHUNK_INFO); @@ -125,12 +114,6 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { return METADATA; } - @Override - public DBColumnFamilyDefinition - getDeletedBlocksColumnFamily() { - return DELETED_BLOCKS; - } - @Override public DBColumnFamilyDefinition getLastChunkInfoColumnFamily() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java index 4b514c04e44e..f5eb1a3d8ec5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java @@ -28,6 +28,9 @@ * places all data in the default column family. */ public class DatanodeStoreSchemaOneImpl extends AbstractDatanodeStore { + + private Table deletedBlocksTable; + /** * Constructs the metadata store and starts the DB Services. * @@ -38,12 +41,15 @@ public DatanodeStoreSchemaOneImpl(ConfigurationSource config, String dbPath, boolean openReadOnly) throws IOException { super(config, new DatanodeSchemaOneDBDefinition(dbPath, config), openReadOnly); + deletedBlocksTable = new DatanodeTable<>( + ((DatanodeSchemaOneDBDefinition) getDbDef()).getDeletedBlocksColumnFamily().getTable(getStore())); + checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); } @Override public Table getDeletedBlocksTable() { // Return a wrapper around the deleted blocks table to handle prefixes // when all data is stored in a single table. - return new SchemaOneDeletedBlocksTable(super.getDeletedBlocksTable()); + return new SchemaOneDeletedBlocksTable(deletedBlocksTable); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java index 25479a7a9c14..1f5c442601b8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.container.metadata; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; @@ -30,7 +31,6 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; -import org.bouncycastle.util.Strings; import org.rocksdb.LiveFileMetaData; import java.io.File; @@ -106,7 +106,6 @@ public void removeKVContainerData(long containerID) throws IOException { try (BatchOperation batch = getBatchHandler().initBatchOperation()) { getMetadataTable().deleteBatchWithPrefix(batch, prefix); getBlockDataTable().deleteBatchWithPrefix(batch, prefix); - getDeletedBlocksTable().deleteBatchWithPrefix(batch, prefix); getDeleteTransactionTable().deleteBatchWithPrefix(batch, prefix); getBatchHandler().commitBatchOperation(batch); } @@ -119,8 +118,6 @@ public void dumpKVContainerData(long containerID, File dumpDir) getTableDumpFile(getMetadataTable(), dumpDir), prefix); getBlockDataTable().dumpToFileWithPrefix( getTableDumpFile(getBlockDataTable(), dumpDir), prefix); - getDeletedBlocksTable().dumpToFileWithPrefix( - getTableDumpFile(getDeletedBlocksTable(), dumpDir), prefix); getDeleteTransactionTable().dumpToFileWithPrefix( getTableDumpFile(getDeleteTransactionTable(), dumpDir), prefix); @@ -132,8 +129,6 @@ public void loadKVContainerData(File dumpDir) getTableDumpFile(getMetadataTable(), dumpDir)); getBlockDataTable().loadFromFile( getTableDumpFile(getBlockDataTable(), dumpDir)); - getDeletedBlocksTable().loadFromFile( - getTableDumpFile(getDeletedBlocksTable(), dumpDir)); getDeleteTransactionTable().loadFromFile( getTableDumpFile(getDeleteTransactionTable(), dumpDir)); } @@ -157,13 +152,12 @@ public void compactionIfNeeded() throws Exception { int numThreshold = df.getAutoCompactionSmallSstFileNum(); long sizeThreshold = df.getAutoCompactionSmallSstFileSize(); Map>> stat = new HashMap<>(); - Map> map; for (LiveFileMetaData file: liveFileMetaDataList) { if (file.size() >= sizeThreshold) { continue; } - String cf = Strings.fromByteArray(file.columnFamilyName()); + String cf = StringUtils.bytes2String(file.columnFamilyName()); stat.computeIfAbsent(cf, k -> new HashMap<>()); stat.computeIfPresent(cf, (k, v) -> { v.computeIfAbsent(file.level(), l -> new LinkedList<>()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java index f2b879706b7b..5941cc6cf89c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java @@ -71,10 +71,10 @@ public class ContainerScannerConfiguration { @Config(key = "enabled", type = ConfigType.BOOLEAN, - defaultValue = "false", + defaultValue = "true", tags = {ConfigTag.STORAGE}, description = "Config parameter to enable all container scanners.") - private boolean enabled = false; + private boolean enabled = true; @Config(key = "dev.data.scan.enabled", type = ConfigType.BOOLEAN, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 1929c16089b0..f20094079c9e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -95,18 +95,17 @@ public void importContainer(long containerID, Path tarFilePath, throws IOException { if (!importContainerProgress.add(containerID)) { deleteFileQuietely(tarFilePath); - LOG.warn("Container import in progress with container Id {}", - containerID); - throw new StorageContainerException("Container " + - "import in progress with container Id " + containerID, + String log = "Container import in progress with container Id " + containerID; + LOG.warn(log); + throw new StorageContainerException(log, ContainerProtos.Result.CONTAINER_EXISTS); } try { if (containerSet.getContainer(containerID) != null) { - LOG.warn("Container already exists with container Id {}", containerID); - throw new StorageContainerException("Container already exists " + - "with container Id " + containerID, + String log = "Container already exists with container Id " + containerID; + LOG.warn(log); + throw new StorageContainerException(log, ContainerProtos.Result.CONTAINER_EXISTS); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java index 20c36b4d1fcf..6bc237207b37 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java @@ -19,7 +19,10 @@ package org.apache.hadoop.ozone.container.replication; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; +import java.util.HashSet; +import java.util.Set; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerResponseProto; @@ -28,11 +31,18 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; +import org.apache.ratis.thirdparty.com.google.protobuf.MessageLite; +import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor; +import org.apache.ratis.thirdparty.io.grpc.ServerCallHandler; +import org.apache.ratis.thirdparty.io.grpc.ServerServiceDefinition; import org.apache.ratis.thirdparty.io.grpc.stub.CallStreamObserver; import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc.getDownloadMethod; +import static org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc.getUploadMethod; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.fromProto; /** @@ -49,10 +59,79 @@ public class GrpcReplicationService extends private final ContainerReplicationSource source; private final ContainerImporter importer; + private final boolean zeroCopyEnabled; + + private final ZeroCopyMessageMarshaller + sendContainerZeroCopyMessageMarshaller; + + private final ZeroCopyMessageMarshaller + copyContainerZeroCopyMessageMarshaller; + public GrpcReplicationService(ContainerReplicationSource source, - ContainerImporter importer) { + ContainerImporter importer, boolean zeroCopyEnabled) { this.source = source; this.importer = importer; + this.zeroCopyEnabled = zeroCopyEnabled; + + if (zeroCopyEnabled) { + sendContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + SendContainerRequest.getDefaultInstance()); + copyContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + CopyContainerRequestProto.getDefaultInstance()); + } else { + sendContainerZeroCopyMessageMarshaller = null; + copyContainerZeroCopyMessageMarshaller = null; + } + } + + public ServerServiceDefinition bindServiceWithZeroCopy() { + ServerServiceDefinition orig = super.bindService(); + if (!zeroCopyEnabled) { + LOG.info("Zerocopy is not enabled."); + return orig; + } + + Set methodNames = new HashSet<>(); + ServerServiceDefinition.Builder builder = + ServerServiceDefinition.builder(orig.getServiceDescriptor().getName()); + + // Add `upload` method with zerocopy marshaller. + MethodDescriptor uploadMethod = + getUploadMethod(); + addZeroCopyMethod(orig, builder, uploadMethod, + sendContainerZeroCopyMessageMarshaller); + methodNames.add(uploadMethod.getFullMethodName()); + + // Add `download` method with zerocopy marshaller. + MethodDescriptor + downloadMethod = getDownloadMethod(); + addZeroCopyMethod(orig, builder, downloadMethod, + copyContainerZeroCopyMessageMarshaller); + methodNames.add(downloadMethod.getFullMethodName()); + + // Add other methods as is. + orig.getMethods().stream().filter( + x -> !methodNames.contains(x.getMethodDescriptor().getFullMethodName()) + ).forEach( + builder::addMethod + ); + + return builder.build(); + } + + private static void addZeroCopyMethod( + ServerServiceDefinition orig, + ServerServiceDefinition.Builder newServiceBuilder, + MethodDescriptor origMethod, + ZeroCopyMessageMarshaller zeroCopyMarshaller) { + MethodDescriptor newMethod = origMethod.toBuilder() + .setRequestMarshaller(zeroCopyMarshaller) + .build(); + @SuppressWarnings("unchecked") + ServerCallHandler serverCallHandler = + (ServerCallHandler) orig.getMethod( + newMethod.getFullMethodName()).getServerCallHandler(); + newServiceBuilder.addMethod(newMethod, serverCallHandler); } @Override @@ -76,13 +155,21 @@ public void download(CopyContainerRequestProto request, } finally { // output may have already been closed, ignore such errors IOUtils.cleanupWithLogger(LOG, outputStream); + + if (copyContainerZeroCopyMessageMarshaller != null) { + InputStream popStream = + copyContainerZeroCopyMessageMarshaller.popStream(request); + if (popStream != null) { + IOUtils.cleanupWithLogger(LOG, popStream); + } + } } } @Override public StreamObserver upload( StreamObserver responseObserver) { - - return new SendContainerRequestHandler(importer, responseObserver); + return new SendContainerRequestHandler(importer, responseObserver, + sendContainerZeroCopyMessageMarshaller); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java index fa3763d88067..7becbe752189 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java @@ -27,7 +27,6 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.ozone.container.replication.AbstractReplicationTask.Status; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.Time; /** @@ -98,38 +97,31 @@ public void close() throws Exception { DefaultMetricsSystem.instance().unregisterSource(metricsName()); } - @VisibleForTesting - public MutableCounterLong getSuccess() { + MutableCounterLong getSuccess() { return success; } - @VisibleForTesting - public MutableGaugeLong getSuccessTime() { + MutableGaugeLong getSuccessTime() { return successTime; } - @VisibleForTesting - public MutableGaugeLong getFailureTime() { + MutableGaugeLong getFailureTime() { return failureTime; } - @VisibleForTesting - public MutableCounterLong getFailure() { + MutableCounterLong getFailure() { return failure; } - @VisibleForTesting - public MutableGaugeLong getQueueTime() { + MutableGaugeLong getQueueTime() { return queueTime; } - @VisibleForTesting - public MutableGaugeLong getTransferredBytes() { + MutableGaugeLong getTransferredBytes() { return transferredBytes; } - @VisibleForTesting - public MutableGaugeLong getFailureBytes() { + MutableGaugeLong getFailureBytes() { return failureBytes; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java index d2407a61d0b5..f72ca2a6881d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java @@ -99,16 +99,18 @@ public ReplicationServer(ContainerController controller, new LinkedBlockingQueue<>(replicationQueueLimit), threadFactory); - init(); + init(replicationConfig.isZeroCopyEnable()); } - public void init() { + public void init(boolean enableZeroCopy) { + GrpcReplicationService grpcReplicationService = new GrpcReplicationService( + new OnDemandContainerReplicationSource(controller), importer, + enableZeroCopy); NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port) .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) - .addService(ServerInterceptors.intercept(new GrpcReplicationService( - new OnDemandContainerReplicationSource(controller), - importer - ), new GrpcServerInterceptor())) + .addService(ServerInterceptors.intercept( + grpcReplicationService.bindServiceWithZeroCopy(), + new GrpcServerInterceptor())) .executor(executor); if (secConf.isSecurityEnabled() && secConf.isGrpcTlsEnabled()) { @@ -203,6 +205,11 @@ public static final class ReplicationConfig { static final String REPLICATION_OUTOFSERVICE_FACTOR_KEY = PREFIX + "." + OUTOFSERVICE_FACTOR_KEY; + public static final String ZEROCOPY_ENABLE_KEY = "zerocopy.enabled"; + private static final boolean ZEROCOPY_ENABLE_DEFAULT = true; + private static final String ZEROCOPY_ENABLE_DEFAULT_VALUE = + "true"; + /** * The maximum number of replication commands a single datanode can execute * simultaneously. @@ -244,6 +251,15 @@ public static final class ReplicationConfig { ) private double outOfServiceFactor = OUTOFSERVICE_FACTOR_DEFAULT; + @Config(key = ZEROCOPY_ENABLE_KEY, + type = ConfigType.BOOLEAN, + defaultValue = ZEROCOPY_ENABLE_DEFAULT_VALUE, + tags = {DATANODE, SCM}, + description = "Specify if zero-copy should be enabled for " + + "replication protocol." + ) + private boolean zeroCopyEnable = ZEROCOPY_ENABLE_DEFAULT; + public double getOutOfServiceFactor() { return outOfServiceFactor; } @@ -277,6 +293,14 @@ public void setReplicationQueueLimit(int limit) { this.replicationQueueLimit = limit; } + public boolean isZeroCopyEnable() { + return zeroCopyEnable; + } + + public void setZeroCopyEnable(boolean zeroCopyEnable) { + this.zeroCopyEnable = zeroCopyEnable; + } + @PostConstruct public void validate() { if (replicationMaxStreams < 1) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index ee51463309b8..5ceea125e814 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -106,7 +106,6 @@ public static class Builder { private Clock clock; private IntConsumer executorThreadUpdater = threadCount -> { }; - private String threadNamePrefix; public Builder clock(Clock newClock) { clock = newClock; @@ -138,11 +137,6 @@ public Builder executorThreadUpdater(IntConsumer newUpdater) { return this; } - public Builder threadNamePrefix(String threadPrefix) { - this.threadNamePrefix = threadPrefix; - return this; - } - public ReplicationSupervisor build() { if (replicationConfig == null || datanodeConfig == null) { ConfigurationSource conf = new OzoneConfiguration(); @@ -162,6 +156,7 @@ public ReplicationSupervisor build() { if (executor == null) { LOG.info("Initializing replication supervisor with thread count = {}", replicationConfig.getReplicationMaxStreams()); + String threadNamePrefix = context != null ? context.getThreadNamePrefix() : ""; ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat(threadNamePrefix + "ContainerReplicationThread-%d") diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java index 6bcd46ba0a7a..506a96fe0514 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java @@ -24,11 +24,13 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -53,12 +55,15 @@ class SendContainerRequestHandler private HddsVolume volume; private Path path; private CopyContainerCompression compression; + private final ZeroCopyMessageMarshaller marshaller; SendContainerRequestHandler( ContainerImporter importer, - StreamObserver responseObserver) { + StreamObserver responseObserver, + ZeroCopyMessageMarshaller marshaller) { this.importer = importer; this.responseObserver = responseObserver; + this.marshaller = marshaller; } @Override @@ -98,6 +103,13 @@ public void onNext(SendContainerRequest req) { nextOffset += length; } catch (Throwable t) { onError(t); + } finally { + if (marshaller != null) { + InputStream popStream = marshaller.popStream(req); + if (popStream != null) { + IOUtils.cleanupWithLogger(LOG, popStream); + } + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java index f12b976c4d31..f6633cb9d370 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.ozone.protocol.commands; -import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.HddsIdFactory; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; @@ -41,12 +41,12 @@ public class ReconstructECContainersCommand private final long containerID; private final List sources; private final List targetDatanodes; - private final byte[] missingContainerIndexes; + private final ByteString missingContainerIndexes; private final ECReplicationConfig ecReplicationConfig; public ReconstructECContainersCommand(long containerID, List sources, - List targetDatanodes, byte[] missingContainerIndexes, + List targetDatanodes, ByteString missingContainerIndexes, ECReplicationConfig ecReplicationConfig) { this(containerID, sources, targetDatanodes, missingContainerIndexes, ecReplicationConfig, HddsIdFactory.getLongId()); @@ -54,16 +54,15 @@ public ReconstructECContainersCommand(long containerID, public ReconstructECContainersCommand(long containerID, List sourceDatanodes, - List targetDatanodes, byte[] missingContainerIndexes, + List targetDatanodes, ByteString missingContainerIndexes, ECReplicationConfig ecReplicationConfig, long id) { super(id); this.containerID = containerID; this.sources = sourceDatanodes; this.targetDatanodes = targetDatanodes; - this.missingContainerIndexes = - Arrays.copyOf(missingContainerIndexes, missingContainerIndexes.length); + this.missingContainerIndexes = missingContainerIndexes; this.ecReplicationConfig = ecReplicationConfig; - if (targetDatanodes.size() != missingContainerIndexes.length) { + if (targetDatanodes.size() != missingContainerIndexes.size()) { throw new IllegalArgumentException("Number of target datanodes and " + "container indexes should be same"); } @@ -85,15 +84,11 @@ public ReconstructECContainersCommandProto getProto() { for (DatanodeDetails dd : targetDatanodes) { builder.addTargets(dd.getProtoBufMessage()); } - builder.setMissingContainerIndexes(getByteString(missingContainerIndexes)); + builder.setMissingContainerIndexes(missingContainerIndexes); builder.setEcReplicationConfig(ecReplicationConfig.toProto()); return builder.build(); } - public static ByteString getByteString(byte[] bytes) { - return (bytes.length == 0) ? ByteString.EMPTY : ByteString.copyFrom(bytes); - } - public static ReconstructECContainersCommand getFromProtobuf( ReconstructECContainersCommandProto protoMessage) { Preconditions.checkNotNull(protoMessage); @@ -108,7 +103,7 @@ public static ReconstructECContainersCommand getFromProtobuf( return new ReconstructECContainersCommand(protoMessage.getContainerID(), srcDatanodeDetails, targetDatanodeDetails, - protoMessage.getMissingContainerIndexes().toByteArray(), + protoMessage.getMissingContainerIndexes(), new ECReplicationConfig(protoMessage.getEcReplicationConfig()), protoMessage.getCmdId()); } @@ -125,9 +120,8 @@ public List getTargetDatanodes() { return targetDatanodes; } - public byte[] getMissingContainerIndexes() { - return Arrays - .copyOf(missingContainerIndexes, missingContainerIndexes.length); + public ByteString getMissingContainerIndexes() { + return missingContainerIndexes; } public ECReplicationConfig getEcReplicationConfig() { @@ -146,7 +140,7 @@ public String toString() { .collect(Collectors.joining(", "))).append("]") .append(", targets: ").append(getTargetDatanodes()) .append(", missingIndexes: ").append( - Arrays.toString(missingContainerIndexes)); + StringUtils.bytes2String(missingContainerIndexes.asReadOnlyByteBuffer())); return sb.toString(); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 33bc4a851664..c63f82025e09 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -344,7 +344,7 @@ public static ContainerController getEmptyContainerController() { public static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); return XceiverServerRatis.newXceiverServerRatis(dn, conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index 7917a4ce55cd..21775245efb2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -155,8 +155,8 @@ public static HddsProtos.ReplicationFactor getReplicationFactor( private static boolean isUseRatis(ConfigurationSource c) { return c.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 5738f5c1106e..e1e1ee9172a8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -82,12 +82,12 @@ void setUp() throws Exception { conf = SCMTestUtils.getConf(testRoot); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, TimeUnit.MILLISECONDS); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); serverAddresses = new ArrayList<>(); scmServers = new ArrayList<>(); mockServers = new ArrayList<>(); @@ -200,7 +200,7 @@ public void testDatanodeStateContext() throws IOException, DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); try (DatanodeStateMachine stateMachine = @@ -327,7 +327,7 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception { DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); try (DatanodeStateMachine stateMachine = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 95df6c647f8b..1cbd6ee4706d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -20,11 +20,13 @@ import com.google.common.collect.Maps; import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; +import org.apache.hadoop.hdds.fs.MockSpaceUsageSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.SpaceUsageSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -39,6 +41,8 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChecksumData; +import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; @@ -46,10 +50,12 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.Op; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -68,6 +74,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.time.Duration; import java.util.Collections; import java.util.HashMap; @@ -78,7 +85,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory; -import static org.apache.hadoop.hdds.fs.MockSpaceUsageSource.fixed; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getContainerCommandResponse; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; @@ -99,6 +105,9 @@ * Test-cases to verify the functionality of HddsDispatcher. */ public class TestHddsDispatcher { + @TempDir + private Path tempDir; + private static final Logger LOG = LoggerFactory.getLogger( TestHddsDispatcher.class); @TempDir @@ -129,6 +138,8 @@ public void testContainerCloseActionWhenFull( (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); @@ -164,6 +175,72 @@ public void testContainerCloseActionWhenFull( } } + @Test + public void testSmallFileChecksum() throws IOException { + String testDirPath = testDir.getPath(); + try { + UUID scmId = UUID.randomUUID(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + dnConf.setChunkDataValidationCheck(true); + conf.setFromObject(dnConf); + DatanodeDetails dd = randomDatanodeDetails(); + HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); + + ContainerCommandResponseProto smallFileResponse = + hddsDispatcher.dispatch(newPutSmallFile(1L, 1L), null); + + assertEquals(ContainerProtos.Result.SUCCESS, smallFileResponse.getResult()); + } finally { + ContainerMetrics.remove(); + } + } + + @Test + public void testWriteChunkChecksum() throws IOException { + String testDirPath = testDir.getPath(); + try { + UUID scmId = UUID.randomUUID(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + dnConf.setChunkDataValidationCheck(true); + conf.setFromObject(dnConf); + DatanodeDetails dd = randomDatanodeDetails(); + HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); + //Send a few WriteChunkRequests + ContainerCommandResponseProto response; + ContainerCommandRequestProto writeChunkRequest0 = getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 0); + hddsDispatcher.dispatch(writeChunkRequest0, null); + hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 1), null); + response = hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 2), null); + + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + // Send Read Chunk request for written chunk. + response = + hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), null); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + + ByteString responseData = BufferUtils.concatByteStrings( + response.getReadChunk().getDataBuffers().getBuffersList()); + assertEquals(writeChunkRequest0.getWriteChunk().getData(), + responseData); + + // Test checksum on Read: + final DispatcherContext context = DispatcherContext + .newBuilder(DispatcherContext.Op.READ_STATE_MACHINE_DATA) + .build(); + response = + hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), context); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + } finally { + ContainerMetrics.remove(); + } + } + @ContainerLayoutTestInfo.ContainerTest public void testContainerCloseActionWhenVolumeFull( ContainerLayoutVersion layoutVersion) throws Exception { @@ -178,7 +255,8 @@ public void testContainerCloseActionWhenVolumeFull( .conf(conf).usageCheckFactory(MockSpaceUsageCheckFactory.NONE); // state of cluster : available (140) > 100 ,datanode volume // utilisation threshold not yet reached. container creates are successful. - SpaceUsageSource spaceUsage = fixed(500, 140, 360); + AtomicLong usedSpace = new AtomicLong(360); + SpaceUsageSource spaceUsage = MockSpaceUsageSource.of(500, usedSpace); SpaceUsageCheckFactory factory = MockSpaceUsageCheckFactory.of( spaceUsage, Duration.ZERO, inMemory(new AtomicLong(0))); @@ -196,6 +274,8 @@ public void testContainerCloseActionWhenVolumeFull( 50, UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); @@ -212,6 +292,7 @@ public void testContainerCloseActionWhenVolumeFull( hddsDispatcher.setClusterId(scmId.toString()); containerData.getVolume().getVolumeInfo() .ifPresent(volumeInfo -> volumeInfo.incrementUsedSpace(50)); + usedSpace.addAndGet(50); ContainerCommandResponseProto response = hddsDispatcher .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null); assertEquals(ContainerProtos.Result.SUCCESS, @@ -512,6 +593,84 @@ private ContainerCommandRequestProto getWriteChunkRequest( .build(); } + static ChecksumData checksum(ByteString data) { + try { + return new Checksum(ContainerProtos.ChecksumType.CRC32, 256) + .computeChecksum(data.asReadOnlyByteBuffer()); + } catch (OzoneChecksumException e) { + throw new IllegalStateException(e); + } + } + + private ContainerCommandRequestProto getWriteChunkRequest0( + String datanodeId, Long containerId, Long localId, int chunkNum) { + final int lenOfBytes = 32; + ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + + ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo + .newBuilder() + .setChunkName( + DigestUtils.md5Hex("dummy-key") + "_stream_" + + containerId + "_chunk_" + localId) + .setOffset((long) chunkNum * lenOfBytes) + .setLen(lenOfBytes) + .setChecksumData(checksum(chunkData).getProtoBufMessage()) + .build(); + + WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto + .newBuilder() + .setBlockID(new BlockID(containerId, localId) + .getDatanodeBlockIDProtobuf()) + .setChunkData(chunk) + .setData(chunkData); + + return ContainerCommandRequestProto + .newBuilder() + .setContainerID(containerId) + .setCmdType(ContainerProtos.Type.WriteChunk) + .setDatanodeUuid(datanodeId) + .setWriteChunk(writeChunkRequest) + .build(); + } + + static ContainerCommandRequestProto newPutSmallFile(Long containerId, Long localId) { + ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + return newPutSmallFile(new BlockID(containerId, localId), chunkData); + } + + static ContainerCommandRequestProto newPutSmallFile( + BlockID blockID, ByteString data) { + final ContainerProtos.BlockData.Builder blockData + = ContainerProtos.BlockData.newBuilder() + .setBlockID(blockID.getDatanodeBlockIDProtobuf()); + final ContainerProtos.PutBlockRequestProto.Builder putBlockRequest + = ContainerProtos.PutBlockRequestProto.newBuilder() + .setBlockData(blockData); + final ContainerProtos.KeyValue keyValue = ContainerProtos.KeyValue.newBuilder() + .setKey("OverWriteRequested") + .setValue("true") + .build(); + final ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(blockID.getLocalID() + "_chunk") + .setOffset(0) + .setLen(data.size()) + .addMetadata(keyValue) + .setChecksumData(checksum(data).getProtoBufMessage()) + .build(); + final ContainerProtos.PutSmallFileRequestProto putSmallFileRequest + = ContainerProtos.PutSmallFileRequestProto.newBuilder() + .setChunkInfo(chunk) + .setBlock(putBlockRequest) + .setData(data) + .build(); + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.PutSmallFile) + .setContainerID(blockID.getContainerID()) + .setDatanodeUuid(UUID.randomUUID().toString()) + .setPutSmallFile(putSmallFileRequest) + .build(); + } + /** * Creates container read chunk request using input container write chunk * request. @@ -554,12 +713,12 @@ private void verify() { @Override public void verify(ContainerCommandRequestProtoOrBuilder cmd, - String user, String encodedToken) { + String encodedToken) { verify(); } @Override - public void verify(String user, Token token, + public void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) { verify(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java index 565853c22dde..657afc38874a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java @@ -178,7 +178,7 @@ public void isCreatedWitDefaultValues() { public void testConf() throws Exception { final OzoneConfiguration conf = new OzoneConfiguration(); final String dir = "dummy/dir"; - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final DatanodeRatisServerConfig ratisConf = conf.getObject( DatanodeRatisServerConfig.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index 90ed4ca4cc91..dcabad46ac54 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -333,6 +333,45 @@ public void testDeleteBlockCommandHandleWhenDeleteCommandQueuesFull() } } + @ContainerTestVersionInfo.ContainerTest + public void testDuplicateDeleteBlocksCommand( + ContainerTestVersionInfo versionInfo) throws Exception { + prepareTest(versionInfo); + assertThat(containerSet.containerCount()).isGreaterThan(0); + Container container = containerSet.getContainerIterator(volume1).next(); + DeletedBlocksTransaction transaction = createDeletedBlocksTransaction(100, + container.getContainerData().getContainerID()); + + List results1 = + handler.executeCmdWithRetry(Arrays.asList(transaction)); + List results2 = + handler.executeCmdWithRetry(Arrays.asList(transaction)); + + transaction = createDeletedBlocksTransaction(99, + container.getContainerData().getContainerID()); + List results3 = + handler.executeCmdWithRetry(Arrays.asList(transaction)); + + String schemaVersionOrDefault = ((KeyValueContainerData) + container.getContainerData()).getSupportedSchemaVersionOrDefault(); + verify(handler.getSchemaHandlers().get(schemaVersionOrDefault), + times(3)).handle(any(), any()); + // submitTasks will be executed three times + verify(handler, times(3)).submitTasks(any()); + + assertEquals(1, results1.size()); + assertTrue(results1.get(0).getSuccess()); + assertEquals(1, results2.size()); + assertTrue(results2.get(0).getSuccess()); + assertEquals(1, results3.size()); + assertTrue(results3.get(0).getSuccess()); + assertEquals(0, + blockDeleteMetrics.getTotalLockTimeoutTransactionCount()); + // Duplicate cmd content will not be persisted. + assertEquals(2, + ((KeyValueContainerData) container.getContainerData()).getNumPendingDeletionBlocks()); + } + private DeletedBlocksTransaction createDeletedBlocksTransaction(long txID, long containerID) { return DeletedBlocksTransaction.newBuilder() @@ -347,7 +386,11 @@ private static class TestSchemaHandler implements SchemaHandler { @Override public void handle(KeyValueContainerData containerData, DeletedBlocksTransaction tx) throws IOException { - // doNoting just for Test + if (DeleteBlocksCommandHandler.isDuplicateTransaction(containerData.getContainerID(), containerData, tx, null)) { + return; + } + containerData.incrPendingDeletionBlocks(tx.getLocalIDCount()); + containerData.updateDeleteTransactionId(tx.getTxID()); } } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java index 09fa8a991770..7d8b94e57d35 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java @@ -37,6 +37,7 @@ import java.util.OptionalLong; import java.util.UUID; +import com.google.protobuf.Proto2Utils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -79,7 +80,8 @@ public void handlesReconstructContainerCommand() throws Exception { targetDns.add(MockDatanodeDetails.randomDatanodeDetails()); targetDns.add(MockDatanodeDetails.randomDatanodeDetails()); ReconstructECContainersCommand cmd = new ReconstructECContainersCommand( - 1, emptyList(), targetDns, new byte[]{2, 5}, + 1, emptyList(), targetDns, + Proto2Utils.unsafeByteString(new byte[]{2, 5}), new ECReplicationConfig(3, 2)); when(scm.sendHeartbeat(any())) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java index d05c127838f1..387997db736d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.volume.DbVolume; @@ -43,7 +44,13 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrowsExactly; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mockStatic; + +import org.mockito.MockedStatic; +import org.mockito.Mockito; + /** * Test for {@link HddsVolumeUtil}. @@ -95,6 +102,34 @@ public void teardown() { dbVolumeSet.shutdown(); } + @Test + public void testLoadHDDVolumeWithInitDBException() + throws Exception { + // Create db instances for all HDDsVolumes. + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + hddsVolume.format(clusterId); + hddsVolume.createWorkingDir(clusterId, null); + } + + try (MockedStatic mocked = mockStatic(HddsVolumeUtil.class, Mockito.CALLS_REAL_METHODS)) { + // Simulating the init DB Exception + mocked.when(() -> HddsVolumeUtil.initPerDiskDBStore(Mockito.anyString(), Mockito.any(), Mockito.anyBoolean())) + .thenThrow(new IOException("Mocked Exception")); + + reinitVolumes(); + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + assertThrowsExactly(IOException.class, () -> hddsVolume.loadDbStore(true)); + // If the Volume init DB is abnormal, the Volume should be recognized as a failed Volume + assertEquals(VolumeCheckResult.FAILED, hddsVolume.check(false)); + assertTrue(hddsVolume.isDbLoadFailure()); + assertFalse(hddsVolume.isDbLoaded()); + } + } + + } + @Test public void testLoadAllHddsVolumeDbStoreWithoutDbVolumes() throws IOException { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java index 4718df3ae3f4..1eba25c35712 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java @@ -38,6 +38,7 @@ import java.util.Map; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -60,6 +61,9 @@ public void setup() throws Exception { String volume2 = baseDir + "disk2"; String volume3 = baseDir + "disk3"; policy = new CapacityVolumeChoosingPolicy(); + // Use the exact capacity and availability specified in this test. Do not reserve space to prevent volumes from + // filling up. + CONF.setFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, 0); SpaceUsageSource source1 = MockSpaceUsageSource.fixed(500, 100); SpaceUsageCheckFactory factory1 = MockSpaceUsageCheckFactory.of( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java index 3859cd47c9b9..46b8cc6772e8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java @@ -59,7 +59,7 @@ public class TestPeriodicVolumeChecker { public void setup() throws IOException { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, Files.createDirectory(folder.resolve("VolumeCheckerDir")).toString()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java index c5d127446bfc..cd9beab4b797 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java @@ -25,6 +25,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import java.io.File; +import java.nio.file.Files; import java.nio.file.Path; import java.util.UUID; @@ -52,6 +54,27 @@ public void setup() throws Exception { .usageCheckFactory(MockSpaceUsageCheckFactory.NONE); } + @Test + public void testDefaultConfig() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + HddsVolume hddsVolume = volumeBuilder.conf(conf).build(); + float percentage = conf.getFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, + HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); + assertEquals(percentage, HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); + + // Gets the total capacity reported by Ozone, which may be limited to less than the volume's real capacity by the + // DU reserved configurations. + long volumeCapacity = hddsVolume.getCapacity(); + VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting(); + + // Gets the actual total capacity without accounting for DU reserved space configurations. + long totalCapacity = usage.realUsage().getCapacity(); + long reservedCapacity = usage.getReservedBytes(); + + assertEquals(getExpectedDefaultReserved(hddsVolume), reservedCapacity); + assertEquals(totalCapacity - reservedCapacity, volumeCapacity); + } + /** * Test reserved capacity with respect to the percentage of actual capacity. * @throws Exception @@ -66,20 +89,15 @@ public void testVolumeCapacityAfterReserve() throws Exception { HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); long volumeCapacity = hddsVolume.getCapacity(); - //Gets the actual total capacity - long totalCapacity = hddsVolume.getVolumeInfo().get() - .getUsageForTesting().getCapacity(); - long reservedCapacity = hddsVolume.getVolumeInfo().get() - .getReservedInBytes(); - //Volume Capacity with Reserved - long volumeCapacityReserved = totalCapacity - reservedCapacity; + VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting(); - long reservedFromVolume = hddsVolume.getVolumeInfo().get() - .getReservedInBytes(); + //Gets the actual total capacity + long totalCapacity = usage.realUsage().getCapacity(); + long reservedCapacity = usage.getReservedBytes(); long reservedCalculated = (long) Math.ceil(totalCapacity * percentage); - assertEquals(reservedFromVolume, reservedCalculated); - assertEquals(volumeCapacity, volumeCapacityReserved); + assertEquals(reservedCalculated, reservedCapacity); + assertEquals(totalCapacity - reservedCapacity, volumeCapacity); } /** @@ -97,17 +115,7 @@ public void testReservedWhenBothConfigSet() throws Exception { long reservedFromVolume = hddsVolume.getVolumeInfo().get() .getReservedInBytes(); - assertEquals(reservedFromVolume, 500); - } - - @Test - public void testReservedToZeroWhenBothConfigNotSet() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - HddsVolume hddsVolume = volumeBuilder.conf(conf).build(); - - long reservedFromVolume = hddsVolume.getVolumeInfo().get() - .getReservedInBytes(); - assertEquals(reservedFromVolume, 0); + assertEquals(500, reservedFromVolume); } @Test @@ -119,16 +127,15 @@ public void testFallbackToPercentConfig() throws Exception { temp.toString() + ":500B"); HddsVolume hddsVolume = volumeBuilder.conf(conf).build(); - long reservedFromVolume = hddsVolume.getVolumeInfo().get() - .getReservedInBytes(); - assertNotEquals(reservedFromVolume, 0); + VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting(); + long reservedFromVolume = usage.getReservedBytes(); + assertNotEquals(0, reservedFromVolume); - long totalCapacity = hddsVolume.getVolumeInfo().get() - .getUsageForTesting().getCapacity(); + long totalCapacity = usage.realUsage().getCapacity(); float percentage = conf.getFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); long reservedCalculated = (long) Math.ceil(totalCapacity * percentage); - assertEquals(reservedFromVolume, reservedCalculated); + assertEquals(reservedCalculated, reservedFromVolume); } @Test @@ -142,7 +149,7 @@ public void testInvalidConfig() throws Exception { long reservedFromVolume1 = hddsVolume1.getVolumeInfo().get() .getReservedInBytes(); - assertEquals(reservedFromVolume1, 0); + assertEquals(getExpectedDefaultReserved(hddsVolume1), reservedFromVolume1); OzoneConfiguration conf2 = new OzoneConfiguration(); @@ -152,6 +159,27 @@ public void testInvalidConfig() throws Exception { long reservedFromVolume2 = hddsVolume2.getVolumeInfo().get() .getReservedInBytes(); - assertEquals(reservedFromVolume2, 0); + assertEquals(getExpectedDefaultReserved(hddsVolume2), reservedFromVolume2); + } + + @Test + public void testPathsCanonicalized() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + + // Create symlink in folder (which is the root of the volume) + Path symlink = new File(temp.toFile(), "link").toPath(); + Files.createSymbolicLink(symlink, folder); + + // Use the symlink in the configuration. Canonicalization should still match it to folder used in the volume config. + conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, symlink + ":500B"); + HddsVolume hddsVolume = volumeBuilder.conf(conf).build(); + + long reservedFromVolume = hddsVolume.getVolumeInfo().get().getReservedInBytes(); + assertEquals(500, reservedFromVolume); + } + + private long getExpectedDefaultReserved(HddsVolume volume) { + long totalCapacity = volume.getVolumeInfo().get().getUsageForTesting().realUsage().getCapacity(); + return (long) Math.ceil(totalCapacity * HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java index cc6fe87e19d8..1df26365531c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java @@ -36,6 +36,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -58,6 +59,10 @@ public void setup() throws Exception { String volume2 = baseDir + "disk2"; policy = new RoundRobinVolumeChoosingPolicy(); + // Use the exact capacity and availability specified in this test. Do not reserve space to prevent volumes from + // filling up. + CONF.setFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, 0); + SpaceUsageSource source1 = MockSpaceUsageSource.fixed(500, 100); SpaceUsageCheckFactory factory1 = MockSpaceUsageCheckFactory.of( source1, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 1159d4277c78..68e687fefade 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -82,7 +82,7 @@ public void setup() throws Exception { volumes.add(volume1); volumes.add(volume2); conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dataDirKey); initializeVolumeSet(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index e3c610bfe47a..55df5f43b6b8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -45,6 +46,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -76,6 +78,9 @@ */ @Timeout(30) public class TestVolumeSetDiskChecks { + @TempDir + private Path tempDir; + public static final Logger LOG = LoggerFactory.getLogger( TestVolumeSetDiskChecks.class); @TempDir @@ -228,7 +233,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) { for (int i = 0; i < numDirs; ++i) { metaDirs.add(new File(dir, randomAlphanumeric(10)).toString()); } - ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + ozoneConf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", metaDirs)); final List dbDirs = new ArrayList<>(); @@ -302,11 +307,15 @@ public void testVolumeFailure() throws IOException { dummyChecker); KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container); KeyValueContainer container1 = new KeyValueContainer(data1, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet1.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container1.create(volumeSet1, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container1); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 60dfe8509bda..b24a6f04c488 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; @@ -123,9 +122,7 @@ public void testKeyValueContainerCheckCorruption( assertFalse(block.getChunks().isEmpty()); ContainerProtos.ChunkInfo c = block.getChunks().get(0); BlockID blockID = block.getBlockID(); - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(c); - File chunkFile = getChunkLayout() - .getChunkFile(containerData, blockID, chunkInfo); + File chunkFile = getChunkLayout().getChunkFile(containerData, blockID, c.getChunkName()); long length = chunkFile.length(); assertThat(length).isGreaterThan(0); // forcefully truncate the file to induce failure. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java index 16060a4c387a..12a659b7e443 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java @@ -17,11 +17,8 @@ */ package org.apache.hadoop.ozone.container.keyvalue; -import com.google.gson.Gson; -import com.google.gson.JsonArray; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; -import com.google.gson.JsonPrimitive; +import com.fasterxml.jackson.databind.JsonNode; +import org.apache.hadoop.hdds.JsonTestUtils; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; @@ -312,7 +309,7 @@ public void inspectThenRepairOnIncorrectContainer( String containerState = containerData.getState().toString(); // First inspect the container. - JsonObject inspectJson = runInspectorAndGetReport(containerData, + JsonNode inspectJson = runInspectorAndGetReport(containerData, KeyValueContainerMetadataInspector.Mode.INSPECT); checkJsonReportForIncorrectContainer(inspectJson, @@ -322,7 +319,7 @@ public void inspectThenRepairOnIncorrectContainer( checkDbCounts(containerData, setBlocks, setBytes, deleteCount); // Now repair the container. - JsonObject repairJson = runInspectorAndGetReport(containerData, + JsonNode repairJson = runInspectorAndGetReport(containerData, KeyValueContainerMetadataInspector.Mode.REPAIR); checkJsonReportForIncorrectContainer(repairJson, containerState, createdBlocks, setBlocks, createdBytes, setBytes, @@ -333,38 +330,36 @@ public void inspectThenRepairOnIncorrectContainer( } @SuppressWarnings("checkstyle:ParameterNumber") - private void checkJsonReportForIncorrectContainer(JsonObject inspectJson, + private void checkJsonReportForIncorrectContainer(JsonNode inspectJson, String expectedContainerState, long createdBlocks, long setBlocks, long createdBytes, long setBytes, long createdFiles, long setPendingDeleteCount, long createdPendingDeleteCount, boolean shouldRepair) { // Check main container properties. - assertEquals(inspectJson.get("containerID").getAsLong(), - CONTAINER_ID); - assertEquals(inspectJson.get("containerState").getAsString(), - expectedContainerState); + assertEquals(inspectJson.get("containerID").asLong(), CONTAINER_ID); + assertEquals(inspectJson.get("containerState").asText(), expectedContainerState); // Check DB metadata. - JsonObject jsonDbMetadata = inspectJson.getAsJsonObject("dBMetadata"); + JsonNode jsonDbMetadata = inspectJson.get("dBMetadata"); assertEquals(setBlocks, - jsonDbMetadata.get(OzoneConsts.BLOCK_COUNT).getAsLong()); + jsonDbMetadata.get(OzoneConsts.BLOCK_COUNT).asLong()); assertEquals(setBytes, - jsonDbMetadata.get(OzoneConsts.CONTAINER_BYTES_USED).getAsLong()); + jsonDbMetadata.get(OzoneConsts.CONTAINER_BYTES_USED).asLong()); // Check aggregate metadata values. - JsonObject jsonAggregates = inspectJson.getAsJsonObject("aggregates"); + JsonNode jsonAggregates = inspectJson.get("aggregates"); assertEquals(createdBlocks, - jsonAggregates.get("blockCount").getAsLong()); + jsonAggregates.get("blockCount").asLong()); assertEquals(createdBytes, - jsonAggregates.get("usedBytes").getAsLong()); + jsonAggregates.get("usedBytes").asLong()); assertEquals(createdPendingDeleteCount, - jsonAggregates.get("pendingDeleteBlocks").getAsLong()); + jsonAggregates.get("pendingDeleteBlocks").asLong()); // Check chunks directory. - JsonObject jsonChunksDir = inspectJson.getAsJsonObject("chunksDirectory"); - assertTrue(jsonChunksDir.get("present").getAsBoolean()); + JsonNode jsonChunksDir = inspectJson.get("chunksDirectory"); + assertTrue(jsonChunksDir.get("present").asBoolean()); assertEquals(createdFiles, - jsonChunksDir.get("fileCount").getAsLong()); + jsonChunksDir.get("fileCount").asLong()); // Check errors. checkJsonErrorsReport(inspectJson, "dBMetadata.#BLOCKCOUNT", @@ -376,48 +371,41 @@ private void checkJsonReportForIncorrectContainer(JsonObject inspectJson, } private void checkJsonErrorsReport( - JsonObject jsonReport, String propertyValue, + JsonNode jsonReport, String propertyValue, long correctExpected, long correctActual, boolean correctRepair) { if (correctExpected == correctActual) { return; } - checkJsonErrorsReport(jsonReport, propertyValue, - new JsonPrimitive(correctExpected), - new JsonPrimitive(correctActual), - correctRepair); + JsonNode correctExpectedNode = JsonTestUtils.valueToJsonNode(correctExpected); + JsonNode correctActualNode = JsonTestUtils.valueToJsonNode(correctActual); + + checkJsonErrorsReport(jsonReport, propertyValue, correctExpectedNode, + correctActualNode, correctRepair); } /** * Checks the erorr list in the provided JsonReport for an error matching * the template passed in with the parameters. */ - private void checkJsonErrorsReport(JsonObject jsonReport, - String propertyValue, JsonPrimitive correctExpected, - JsonPrimitive correctActual, boolean correctRepair) { + private void checkJsonErrorsReport(JsonNode jsonReport, + String propertyValue, JsonNode correctExpected, + JsonNode correctActual, boolean correctRepair) { - assertFalse(jsonReport.get("correct").getAsBoolean()); + assertFalse(jsonReport.get("correct").asBoolean()); - JsonArray jsonErrors = jsonReport.getAsJsonArray("errors"); + JsonNode jsonErrors = jsonReport.get("errors"); boolean matchFound = false; - for (JsonElement jsonErrorElem: jsonErrors) { - JsonObject jsonErrorObject = jsonErrorElem.getAsJsonObject(); + for (JsonNode jsonErrorElem : jsonErrors) { String thisProperty = - jsonErrorObject.get("property").getAsString(); + jsonErrorElem.get("property").asText(); if (thisProperty.equals(propertyValue)) { matchFound = true; + assertEquals(correctExpected.asLong(), jsonErrorElem.get("expected").asLong()); + assertEquals(correctActual.asLong(), jsonErrorElem.get("actual").asLong()); - JsonPrimitive expectedJsonPrim = - jsonErrorObject.get("expected").getAsJsonPrimitive(); - assertEquals(correctExpected, expectedJsonPrim); - - JsonPrimitive actualJsonPrim = - jsonErrorObject.get("actual").getAsJsonPrimitive(); - assertEquals(correctActual, actualJsonPrim); - - boolean repaired = - jsonErrorObject.get("repaired").getAsBoolean(); + boolean repaired = jsonErrorElem.get("repaired").asBoolean(); assertEquals(correctRepair, repaired); break; } @@ -426,6 +414,7 @@ private void checkJsonErrorsReport(JsonObject jsonReport, assertTrue(matchFound); } + public void setDBBlockAndByteCounts(KeyValueContainerData containerData, long blockCount, long byteCount) throws Exception { setDB(containerData, blockCount, byteCount, @@ -496,20 +485,22 @@ void checkDbCounts(KeyValueContainerData containerData, } } - private JsonObject runInspectorAndGetReport( + private JsonNode runInspectorAndGetReport( KeyValueContainerData containerData, KeyValueContainerMetadataInspector.Mode mode) throws Exception { System.setProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY, mode.toString()); ContainerInspectorUtil.load(); - JsonObject json = runInspectorAndGetReport(containerData); + + JsonNode json = runInspectorAndGetReport(containerData); + ContainerInspectorUtil.unload(); System.clearProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY); return json; } - private JsonObject runInspectorAndGetReport( + private JsonNode runInspectorAndGetReport( KeyValueContainerData containerData) throws Exception { // Use an empty layout so the captured log has no prefix and can be // parsed as json. @@ -521,8 +512,11 @@ private JsonObject runInspectorAndGetReport( capturer.stopCapturing(); String output = capturer.getOutput(); capturer.clearOutput(); - - return new Gson().fromJson(output, JsonObject.class); + // Check if the output is effectively empty + if (output.trim().isEmpty()) { + return null; + } + return JsonTestUtils.readTree(output); } private KeyValueContainer createClosedContainer(int normalBlocks) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java index d2206a7fd680..47d24874749e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java @@ -76,7 +76,7 @@ public void testReadOversizeChunk() throws IOException { // write chunk bypassing size limit File chunkFile = getStrategy().getLayout() - .getChunkFile(getKeyValueContainerData(), blockID, chunkInfo); + .getChunkFile(getKeyValueContainerData(), blockID, chunkInfo.getChunkName()); FileUtils.writeByteArrayToFile(chunkFile, array); // WHEN+THEN diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java index f83216b7126e..27a0bc81d6f6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java @@ -67,7 +67,7 @@ public void testWriteChunkStageWriteAndCommit() throws Exception { long term = 0; long index = 0; File chunkFile = ContainerLayoutVersion.FILE_PER_CHUNK - .getChunkFile(container.getContainerData(), blockID, chunkInfo); + .getChunkFile(container.getContainerData(), blockID, chunkInfo.getChunkName()); File tempChunkFile = new File(chunkFile.getParent(), chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX @@ -109,7 +109,7 @@ public void deletesChunkFileWithLengthIncludingOffset() throws Exception { ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(), offset, chunkInfo.getLen()); File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile( - container.getContainerData(), blockID, chunkInfo); + container.getContainerData(), blockID, chunkInfo.getChunkName()); ChunkUtils.writeData(file, ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(), null, true); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 7f38eab785b8..8fd7b6280b62 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -378,7 +378,7 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) BlockUtils.shutdownCache(conf); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, datanodeDirs.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, datanodeDirs.toString()); MutableVolumeSet volumeSets = new MutableVolumeSet(datanodeId.toString(), clusterId, conf, null, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java index f11a7f5522c1..c4dca7c3498a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java @@ -35,7 +35,7 @@ import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.ON_DEMAND_VOLUME_BYTES_PER_SECOND_KEY; import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.VOLUME_BYTES_PER_SECOND_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test for {@link ContainerScannerConfiguration}. @@ -103,7 +103,7 @@ public void isCreatedWitDefaultValues() { ContainerScannerConfiguration csConf = conf.getObject(ContainerScannerConfiguration.class); - assertFalse(csConf.isEnabled()); + assertTrue(csConf.isEnabled()); assertEquals(METADATA_SCAN_INTERVAL_DEFAULT, csConf.getMetadataScanInterval()); assertEquals(DATA_SCAN_INTERVAL_DEFAULT, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 497418dcdcb9..07804c2a20bd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -165,7 +165,7 @@ public void testBuildNodeReport(ContainerTestVersionInfo versionInfo) throws Exception { initTest(versionInfo); String path = folder.toString(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", path + "/ratis1", path + "/ratis2", path + "ratis3")); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index f479ff93372d..03901b99be3b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -17,14 +17,51 @@ */ package org.apache.hadoop.ozone.container.replication; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerSet; +import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; +import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.ratis.thirdparty.io.grpc.stub.CallStreamObserver; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import java.io.File; import java.io.IOException; import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; +import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.toTarget; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -34,6 +71,148 @@ */ class TestGrpcReplicationService { + @TempDir + private Path tempDir; + + private ReplicationServer replicationServer; + private OzoneConfiguration conf; + private ContainerController containerController; + private DatanodeDetails datanode; + private static final long CONTAINER_ID = 123456L; + private final AtomicLong pushContainerId = new AtomicLong(); + + @BeforeEach + public void setUp() throws Exception { + init(false); + } + + public void init(boolean isZeroCopy) throws Exception { + conf = new OzoneConfiguration(); + + ReplicationServer.ReplicationConfig replicationConfig = + conf.getObject(ReplicationServer.ReplicationConfig.class); + + replicationConfig.setZeroCopyEnable(isZeroCopy); + + SecurityConfig secConf = new SecurityConfig(conf); + + ContainerSet containerSet = new ContainerSet(1000); + + DatanodeDetails.Builder dn = + DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) + .setHostName("localhost").setIpAddress("127.0.0.1") + .setPersistedOpState(HddsProtos.NodeOperationalState.IN_SERVICE) + .setPersistedOpStateExpiry(0); + DatanodeDetails.Port containerPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); + DatanodeDetails.Port ratisPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + DatanodeDetails.Port replicationPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.REPLICATION, + replicationConfig.getPort()); + DatanodeDetails.Port streamPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); + dn.addPort(containerPort); + dn.addPort(ratisPort); + dn.addPort(replicationPort); + dn.addPort(streamPort); + + datanode = dn.build(); + + final String testDir = + Files.createDirectory(tempDir.resolve("VolumeDir")).toString(); + + MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); + when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList( + new HddsVolume.Builder(testDir).conf(conf).build())); + + ContainerMetrics metrics = ContainerMetrics.create(conf); + Handler containerHandler = + new KeyValueHandler(conf, datanode.getUuidString(), containerSet, + volumeSet, metrics, c -> { + }); + + containerController = new ContainerController(containerSet, + Collections.singletonMap( + ContainerProtos.ContainerType.KeyValueContainer, containerHandler)); + + KeyValueContainerData data = new KeyValueContainerData( + CONTAINER_ID, + ContainerLayoutVersion.FILE_PER_BLOCK, GB, UUID.randomUUID().toString(), + datanode.getUuidString()); + KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); + container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), + "test-replication"); + containerSet.addContainer(container); + container.close(); + + ContainerImporter importer = mock(ContainerImporter.class); + doAnswer(invocation -> { + pushContainerId.set((long) invocation.getArguments()[0]); + return null; + }).when(importer).importContainer(anyLong(), any(), any(), any()); + doReturn(true).when(importer).isAllowedContainerImport(eq( + CONTAINER_ID)); + when(importer.chooseNextVolume()).thenReturn(new HddsVolume.Builder( + Files.createDirectory(tempDir.resolve("ImporterDir")).toString()).conf( + conf).build()); + + replicationServer = + new ReplicationServer(containerController, replicationConfig, secConf, + null, importer, datanode.threadNamePrefix()); + replicationServer.start(); + } + + @AfterEach + public void cleanup() { + replicationServer.stop(); + } + + @Test + public void testDownload() throws IOException { + SimpleContainerDownloader downloader = + new SimpleContainerDownloader(conf, null); + Path downloadDir = Files.createDirectory(tempDir.resolve("DownloadDir")); + Path result = downloader.getContainerDataFromReplicas( + CONTAINER_ID, + Collections.singletonList(datanode), downloadDir, + CopyContainerCompression.NO_COMPRESSION); + + assertTrue(result.toString().startsWith(downloadDir.toString())); + + File[] files = downloadDir.toFile().listFiles(); + + assertNotNull(files); + assertEquals(files.length, 1); + + assertTrue(files[0].getName().startsWith("container-" + + CONTAINER_ID + "-")); + + downloader.close(); + } + + @Test + public void testUpload() { + ContainerReplicationSource source = + new OnDemandContainerReplicationSource(containerController); + + GrpcContainerUploader uploader = new GrpcContainerUploader(conf, null); + + PushReplicator pushReplicator = new PushReplicator(conf, source, uploader); + + ReplicationTask task = + new ReplicationTask(toTarget(CONTAINER_ID, datanode), pushReplicator); + + pushReplicator.replicate(task); + + assertEquals(pushContainerId.get(), CONTAINER_ID); + } + @Test void closesStreamOnError() { // GIVEN @@ -51,7 +230,7 @@ public void copyData(long containerId, OutputStream destination, }; ContainerImporter importer = mock(ContainerImporter.class); GrpcReplicationService subject = - new GrpcReplicationService(source, importer); + new GrpcReplicationService(source, importer, false); CopyContainerRequestProto request = CopyContainerRequestProto.newBuilder() .setContainerID(1) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java new file mode 100644 index 000000000000..00891cf3e24d --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import org.junit.jupiter.api.BeforeEach; + +/** + * Tests {@link GrpcReplicationService}. + */ +class TestGrpcReplicationServiceWithZeroCopy + extends TestGrpcReplicationService { + @BeforeEach + public void setUp() throws Exception { + init(true); + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 26c6853b64a6..1f69db78d625 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -38,6 +38,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import com.google.protobuf.Proto2Utils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -79,8 +80,8 @@ import static org.apache.hadoop.ozone.container.replication.AbstractReplicationTask.Status.DONE; import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.fromSources; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL; import static org.mockito.Mockito.any; @@ -478,11 +479,8 @@ private static class BlockingTask extends AbstractReplicationTask { @Override public void runTask() { runningLatch.countDown(); - try { - waitForCompleteLatch.await(); - } catch (InterruptedException e) { - fail("Interrupted waiting for the completion latch to be released"); - } + assertDoesNotThrow(() -> waitForCompleteLatch.await(), + "Interrupted waiting for the completion latch to be released"); setStatus(DONE); } } @@ -573,7 +571,7 @@ private static ECReconstructionCommandInfo createReconstructionCmd( new ReconstructECContainersCommand(containerId, sources, target, - missingIndexes, + Proto2Utils.unsafeByteString(missingIndexes), new ECReplicationConfig(3, 2)); return new ECReconstructionCommandInfo(cmd); @@ -607,13 +605,10 @@ public void replicate(ReplicationTask task) { UUID.randomUUID().toString(), UUID.randomUUID().toString()); KeyValueContainer kvc = new KeyValueContainer(kvcd, conf); - - try { + assertDoesNotThrow(() -> { set.addContainer(kvc); task.setStatus(DONE); - } catch (Exception e) { - fail("Unexpected error: " + e.getMessage()); - } + }); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java index f054358b35b4..baaf296f02ba 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java @@ -75,7 +75,7 @@ void testReceiveDataForExistingContainer() throws Exception { return null; }).when(observer).onError(any()); SendContainerRequestHandler sendContainerRequestHandler - = new SendContainerRequestHandler(containerImporter, observer); + = new SendContainerRequestHandler(containerImporter, observer, null); ByteString data = ByteString.copyFromUtf8("test"); ContainerProtos.SendContainerRequest request = ContainerProtos.SendContainerRequest.newBuilder() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index 383e76dcc72a..23b7da263465 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -99,9 +99,9 @@ private void initTests(Boolean enable) throws Exception { conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, schemaV3Enabled); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); setup(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index 137214aa1cd6..59b88bcbea46 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; @@ -532,6 +533,8 @@ public void restartDatanode(int expectedMlv, boolean exactMatch) // Start new datanode with the same configuration. dsm = new DatanodeStateMachine(dd, conf); + StorageVolumeUtil.getHddsVolumesList(dsm.getContainer().getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion(); if (exactMatch) { assertEquals(expectedMlv, mlv); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java index fbb338048cd1..f4e4ec6a2535 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.ozone.protocol.commands; +import com.google.protobuf.ByteString; +import com.google.protobuf.Proto2Utils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -28,7 +30,6 @@ import java.util.List; import java.util.stream.Collectors; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -40,7 +41,7 @@ public class TestReconstructionECContainersCommands { @Test public void testExceptionIfSourceAndMissingNotSameLength() { ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2); - byte[] missingContainerIndexes = {1, 2}; + final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(new byte[]{1, 2}); List targetDns = new ArrayList<>(); targetDns.add(MockDatanodeDetails.randomDatanodeDetails()); @@ -52,7 +53,7 @@ public void testExceptionIfSourceAndMissingNotSameLength() { @Test public void protobufConversion() { - byte[] missingContainerIndexes = {1, 2}; + final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(new byte[]{1, 2}); List srcNodesIndexes = new ArrayList<>(); for (int i = 0; i < srcNodesIndexes.size(); i++) { srcNodesIndexes.add(i + 1L); @@ -82,8 +83,7 @@ public void protobufConversion() { assertEquals(1L, proto.getContainerID()); assertEquals(sources, srcDnsFromProto); assertEquals(targets, targetDnsFromProto); - assertArrayEquals(missingContainerIndexes, - proto.getMissingContainerIndexes().toByteArray()); + assertEquals(missingContainerIndexes, proto.getMissingContainerIndexes()); assertEquals(ecReplicationConfig, new ECReplicationConfig(proto.getEcReplicationConfig())); @@ -96,8 +96,7 @@ public void protobufConversion() { fromProtobuf.getSources()); assertEquals(reconstructECContainersCommand.getTargetDatanodes(), fromProtobuf.getTargetDatanodes()); - assertArrayEquals( - reconstructECContainersCommand.getMissingContainerIndexes(), + assertEquals(reconstructECContainersCommand.getMissingContainerIndexes(), fromProtobuf.getMissingContainerIndexes()); assertEquals( reconstructECContainersCommand.getEcReplicationConfig(), diff --git a/hadoop-hdds/docs/.gitignore b/hadoop-hdds/docs/.gitignore new file mode 100644 index 000000000000..07b56370087f --- /dev/null +++ b/hadoop-hdds/docs/.gitignore @@ -0,0 +1,2 @@ +public +.hugo_build.lock diff --git a/hadoop-hdds/docs/content/design/overwrite-key-only-if-unchanged.md b/hadoop-hdds/docs/content/design/overwrite-key-only-if-unchanged.md new file mode 100644 index 000000000000..c4d4211cabfa --- /dev/null +++ b/hadoop-hdds/docs/content/design/overwrite-key-only-if-unchanged.md @@ -0,0 +1,199 @@ +--- +title: Overwriting an Ozone Key only if it has not changed. +summary: A minimal design illustrating how to replace a key in Ozone only if it has not changes since it was read. +date: 2024-04-05 +jira: HDDS-10657 +status: accepted +author: Stephen ODonnell +--- + + + + +Ozone offers write semantics where the last writer to commit a key wins. Therefore multiple writers can concurrently write the same key, and which ever commits last will effectively overwrite all data that came before it. + +As an extension of this, there is no "locking" on a key which is being replaced. + +For any key, but especially a large key, it can take significant time to read and write it. There are scenarios where it would be desirable to replace a key in Ozone, but only if the key has not changed since it was read. With the absence of a lock, such an operation is not possible today. + +## As Things Stand + +Internally, all Ozone keys have both an objectID and UpdateID which are stored in OM as part of the key metadata. + +Each time something changes on the key, whether it is data or metadata, the updateID is changed. It comes from the ratis transactionID and is generally an increasing number. + +When an existing key is over written, its existing metadata including the ObjectID and ACLs are mirrored onto the new key version. The only metadata which is replaced is any custom metadata stored on the key by the user. Upon commit, the updateID is also changed to the current Ratis transaction ID. + +Writing a key in Ozone is a 3 step process: + +1. The key is opened via an Open Key request from the client to OM +2. The client writes data to the data nodes +3. The client commits the key to OM via a Commit Key call. + +Note, that as things stand, it is possible to lose metadata updates (eg ACL changes) when a key is overwritten. + +1. If the key exists, then a new copy of the key is open for writing. +2. While the new copy is open, another process updates the ACLs for the key +3. On commit, the new ACLs are not copied to the new key as the new key made a copy of the existing metadata at the time the key was opened. + +With the technique described in the next section, that problem is removed in this design, as the ACL update will change the updateID, and the key will not be committed. + +## Atomic Key Replacement + +In relational database applications, records are often assigned an update counter similar to the updateID for a key in Ozone. The data record can be read and displayed on a UI to be edited, and then written back to the database. However another user could have made an edit to the same record in the mean time, and if the record is written back without any checks, those edits could be lost. + +To combat this, "optimistic locking" is used. With Optimistic locking, no locks are actually involved. The client reads the data along with the update counter. When it attempts to write the data back, it validates the record has not change by including the updateID in the update statement, eg: + +``` +update customerDetails +set +where customerID = :b1 +and updateCounter = :b2 +``` +If no records are updated, the application must display an error or reload the customer record to handle the problem. + +In Ozone the same concept can be used to perform an atomic update of a key only if it has not changed since the key details were originally read. + +To do this: + +1. The client reads the key details as usual. The key details can be extended to include the existing updateID as it is currently not passed to the client. This field already exists, but when exposed to the client it will be referred to as the key generation. +1. The client can inspect the read key details and decide if it wants to replace the key. +1. The client opens a new key for writing with the same key name as the original, passing the previously read generation in a new field. Call this new field expectedGeneration. +1. On OM, it receives the openKey request as usual and detects the presence of the expectedGeneration field. +1. On OM, it first ensures that a key is present with the given key name and having a updateID == expectedGeneration. If so, it opens the key and stored the details including the expectedGeneration in the openKeyTable. As things stand, the other existing key metadata copied from the original key is stored in the openKeyTable too. +1. The client continues to write the data as usual. This can be the same data in a different format (eg Ratis to EC conversion), or new data in the key depending on the application's needs. +1. On commit key, the client does not need to send the expectedGeneration again, as the open key contains it. +1. On OM, on commit key, it validates the key still exists with the given key name and its stored updateID is unchanged when compared with the expectedGeneration. If so the key is committed, otherwise an error is returned to the client. + +Note that any change to a key will change the updateID. This is existing behaviour, and committing a rewritten key will also modify the updateID. Note this also offers protection against concurrent rewrites. + +An optional enhancement for large keys, is that on each block allocation the expectedGeneration can be checked against the current key version to ensure it has not changed. This would allow the rewrite to fail early if a large multi block key is modified. + +### Alternative Proposal + +1. Pass the expected expectedGeneration to the rewrite API which passes it down to the relevant key stream, effectively saving it on the client +2. Client attaches the expectedGeneration to the commit request to indicate a rewrite instead of a put +3. OM checks the passed generation against the stored update ID and returns the corresponding success/fail result + +The advantage of this alternative approach is that it does not require the expectedGeneration to be stored in the openKey table. + +However the client code required to implement this appears more complex due to having different key commit logic for Ratis and EC and the parameter needing to be passed through many method calls. + +PR [#5524](https://github.com/apache/ozone/pull/5524) illustrates this approach for the atomicKeyCreation feature which was added to S3. + +The existing implementation for key creation stores various attributes (metadata, creation time, ACLs, ReplicationConfig) in the openKey table, so storing the expectedGeneration keeps with that convention, which is less confusing for future developers. + +In terms of forward / backward compatibility both solutions are equivalent. Only a new parameter is required within the KeyArgs passed to create and commit Key. + +If an upgraded server is rolled back, it will still be able to deal with an openKey entry containing expectedGeneration, but it will not process it atomically. + +### Scope + +The intention is to first implement this for OBS buckets. Then address FSO buckets. FSO bucket handling will reuse the same fields, but the handlers on OM are different. We also need to decide on what should happen if a key is renamed or moved folders during the rewrite. + +Multi-part keys need more investigation and hence are also excluded in the initial version. + +## Changes Required + +In order to enable the above steps on Ozone, several small changes are needed. + +### Wire Protocol + +1. The expectedGeneration needs to be added to the KeyInfo protobuf object so it can be stored in the openKey table. +2. The expectedGeneration needs to be added to the keyArgs protobuf object, which is passed from the client to OM when creating a key. + +No new messages need to be defined. + +### On OM + +No new OM handlers are needed. The existing OpenKey and CommitKey handlers will receive the new expectedGeneration and perform the checks. + +No new locks are needed on OM. As part of the openKey and commitKey, there are existing locks taken to ensure the key open / commit is atomic. The new checks are performed under those locks, and come down to a couple of long comparisons, so add negligible overhead. + +### On The Client + + 1. We need to allow the updateID (called generation on the client) of an existing key to be accessible when an existing details are read, by adding it to OzoneKey and OzoneKeyDetails. There are internal object changes and do no impact any APIs. + 2. To pass the expectedGeneration to OM on key open, it would be possible to overload the existing OzoneBucket.createKey() method, which already has several overloaded versions, or create a new explicit method on Ozone bucket called rewriteKey, passing the expectedGeneration, eg: + + ``` + public OzoneOutputStream rewriteKey(String volumeName, String bucketName, String keyName, long size, long expectedGeneration, ReplicationConfig replicationConfigOfNewKey) + throws IOException + +// Can also add an overloaded version of these methods to pass a metadata map, as with the existing +// create key method. + ``` +This specification is roughly in line with the exiting createKey method: + +``` + public OzoneOutputStream createKey( + String volumeName, String bucketName, String keyName, long size, + ReplicationConfig replicationConfig, + Map metadata) +``` + +An alternative, is to create a new overloaded createKey, but it is probably less confusing to have the new rewriteKey method: + +``` + public OzoneOutputStream createKey( + String volumeName, String bucketName, String keyName, long size, + ReplicationConfig replicationConfig, long expectedUpdateID) +``` + +The intended usage of this API, is that the existing key details are read, perhaps inspected and then used to open the new key, and then data is written. In this example, the key is overwritten with the same data in a different replication format. Equally, the key could be rewritten with the original data modified in some application specific way. The atomic check guarantees against lost updates if another application thread is attempting to update the same key in a different way. + +``` +OzoneKeyDetails exisitingKey = bucket.getKey(keyName); +// Insepect the key and decide if overwrite is desired: +boolean shouldOverwrite = ... +if (shouldOverwrite) { + try (OutputStream os = bucket.rewriteKey(existingKey.getBucket, existingKey.getVolume, + existingKey.getKeyName, existingKey.getSize(), existingKey.getGeneration(), newRepConfig) { + os.write(bucket.readKey(keyName)) +} +``` + +## Upgrade and Compatibility + +### Client Server Protocol + +If a newer client is talking to an older server, it could call the new atomic API but the server will ignore it without error. The client server versioning framework can be used to avoid this problem. + +No new protobuf messages are needed and hence no new Client to OM APIs as the existing APIs are used with an additional parameter. + +A single extra field is added to the KeyArgs object, which is passed from the client to OM on key open and commit. This is a new field, so it will be null if not set, and the server will ignore it if it does not expect it. + +### Disk Layout + +A single extra field is added to the OMKeyInfo object which is stored in the openKey table. This is a new field, so it will be null if not set, and the server will ignore it if it does not expect it. + +There should be no impact on upgrade / downgrade with the new field added in this way. + +## Other Storage Systems + +Amazon S3 does not offer a facility like this. + +Google Cloud has a concept of a generationID which is used in various [API calls](https://cloud.google.com/storage/docs/json_api/v1/objects/update). + +## Further Ideas + +The intention of this initial design is to make as few changes to Ozone as possible to enable overwriting a key if it has not changed. + +It would be possible to have separate generation IDs for metadata changes and data changes to give a more fine grained approach. + +It would also be possible to expose these IDs over the S3 interface as well as the Java interface. + +However both these options required more changes to Ozone and more API surface to test and support. + +The changes suggested here are small, and carry little risk to existing operations if the new field is not passed. They also do not rule out extending the idea to cover a separate metadata generation if such a thing is desired by enough users. diff --git a/hadoop-hdds/docs/content/feature/Decommission.md b/hadoop-hdds/docs/content/feature/Decommission.md index 86a345a460be..8058c0c0902e 100644 --- a/hadoop-hdds/docs/content/feature/Decommission.md +++ b/hadoop-hdds/docs/content/feature/Decommission.md @@ -51,6 +51,14 @@ ozone admin datanode decommission [-hV] [-id=] ``` You can enter multiple hosts to decommission multiple datanodes together. +To view the status of a decommissioning datanode, you can execute the following command: + +```shell +ozone admin datanode status decommission [-hV] [-id=] [--scm=] [--id=] [--ip=] +``` +You can pass the IP address or UUID of one datanode to view only the details related to that datanode. + + **Note:** To recommission a datanode you may execute the below command in cli, ```shell ozone admin datanode recommission [-hV] [-id=] diff --git a/hadoop-hdds/docs/content/feature/Decommission.zh.md b/hadoop-hdds/docs/content/feature/Decommission.zh.md index ad959469b953..231539fe0d1b 100644 --- a/hadoop-hdds/docs/content/feature/Decommission.zh.md +++ b/hadoop-hdds/docs/content/feature/Decommission.zh.md @@ -50,6 +50,14 @@ ozone admin datanode decommission [-hV] [-id=] ``` 您可以输入多个主机,以便一起Decommission多个DataNode。 +查看 Decommission时datanode 的状态,可以执行下面的命令, + +```shell +ozone admin datanode status decommission [-hV] [-id=] [--scm=] [--id=] [--ip=] +``` +您可以指定一个 Datanode 的 IP address 或 UUID 以查看该 Datanode 相关的详细信息。 + + **Note:** 要Recommission某台DataNode的时候,可在命令行执行以下命令, ```shell ozone admin datanode recommission [-hV] [-id=] diff --git a/hadoop-hdds/docs/content/feature/ErasureCoding.md b/hadoop-hdds/docs/content/feature/ErasureCoding.md index 77866762f6d3..c4d3739f1dcd 100644 --- a/hadoop-hdds/docs/content/feature/ErasureCoding.md +++ b/hadoop-hdds/docs/content/feature/ErasureCoding.md @@ -174,7 +174,9 @@ the configuration keys `ozone.server.default.replication.type` and `ozone.server ozone.server.default.replication.type EC +``` +```XML ozone.server.default.replication RS-6-3-1024k @@ -208,6 +210,22 @@ We can pass the EC Replication Config while creating the keys irrespective of bu ozone sh key put --type EC --replication rs-6-3-1024k ``` +When using ofs/o3fs, we can pass the EC Replication Config by setting the configuration keys `ozone.replication.type` and `ozone.replication`. + +```XML + + ozone.replication.type + EC + +``` + +```XML + + ozone.replication + rs-3-2-1024k + +``` + In the case bucket already has default EC Replication Config, there is no need of passing EC Replication Config while creating key. ### Enable Intel ISA-L diff --git a/hadoop-hdds/docs/content/feature/Reconfigurability.md b/hadoop-hdds/docs/content/feature/Reconfigurability.md index af220554ba82..8aa0579139de 100644 --- a/hadoop-hdds/docs/content/feature/Reconfigurability.md +++ b/hadoop-hdds/docs/content/feature/Reconfigurability.md @@ -28,10 +28,11 @@ If a property is reconfigurable, you can modify it in the configuration file (`o command: ```shell -ozone admin reconfig --address= start|status|properties +ozone admin reconfig --service=[OM|SCM|DATANODE] --address= start|status|properties ``` The meaning of command options: +- **--service**: The node type of the server specified with --address - **--address**: RPC address for one server - Three operations are provided: - **start**: Execute the reconfig operation asynchronously @@ -40,60 +41,60 @@ The meaning of command options: ## Retrieve the reconfigurable properties list To retrieve all the reconfigurable properties list for a specific component in Ozone, -you can use the command: `ozone admin reconfig --address= properties`. +you can use the command: `ozone admin reconfig --service=[OM|SCM|DATANODE] --address= properties`. This command will list all the properties that can be dynamically reconfigured at runtime for specific component.
> For example, get the Ozone OM reconfigurable properties list. > ->$ `ozone admin reconfig --address=hadoop1:9862 properties`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## OM Reconfigurability >For example, modify `ozone.administrators` in ozone-site.xml and execute: > -> $ `ozone admin reconfig --address=hadoop1:9862 start`
+> $ `ozone admin reconfig --service=OM --address=hadoop1:9862 start`
OM: Started OM reconfiguration task on node [hadoop1:9862]. > ->$ `ozone admin reconfig --address=hadoop1:9862 status`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 status`
OM: Reconfiguring status for node [hadoop1:9862]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig -address=hadoop1:9862 properties`
+> $ `ozone admin reconfig --service=OM -address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## SCM Reconfigurability >For example, modify `ozone.administrators` in ozone-site.xml and execute: > -> $ `ozone admin reconfig --address=hadoop1:9860 start`
+> $ `ozone admin reconfig --service=SCM --address=hadoop1:9860 start`
SCM: Started OM reconfiguration task on node [hadoop1:9860]. > ->$ `ozone admin reconfig --address=hadoop1:9860 status`
+>$ `ozone admin reconfig --service=SCM --address=hadoop1:9860 status`
SCM: Reconfiguring status for node [hadoop1:9860]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig -address=hadoop1:9860 properties`
+> $ `ozone admin reconfig --service=SCM -address=hadoop1:9860 properties`
SCM: Node [hadoop1:9860] Reconfigurable properties:
ozone.administrators ## Datanode Reconfigurability >For example, modify `ozone.example.config` in ozone-site.xml and execute: > -> $ `ozone admin reconfig --address=hadoop1:19864 start`
+> $ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 start`
Datanode: Started reconfiguration task on node [hadoop1:19864]. > ->$ `ozone admin reconfig --address=hadoop1:19864 status`
+>$ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 status`
Datanode: Reconfiguring status for node [hadoop1:19864]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.example.config
From: "old"
To: "new" > -> $ `ozone admin reconfig -address=hadoop1:19864 properties`
+> $ `ozone admin reconfig --service=DATANODE -address=hadoop1:19864 properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config @@ -104,7 +105,7 @@ Currently, only Datanode supports batch operations
>For example, to list the reconfigurable properties of all Datanodes:
-> $ `ozone admin reconfig --in-service-datanodes properties`
+> $ `ozone admin reconfig --service=DATANODE --in-service-datanodes properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config
Datanode: Node [hadoop2:19864] Reconfigurable properties:
diff --git a/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md b/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md index 8e983a98ab8d..957f0510548e 100644 --- a/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md +++ b/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md @@ -27,10 +27,11 @@ Ozone支持在不重启服务的情况下动态加载某些配置。如果某个 命令: ```shell -ozone admin reconfig --address= start|status|properties +ozone admin reconfig --service=[OM|SCM|DATANODE] --address= start|status|properties ``` 命令项的含义: +- **--service**: --address 指定节点的Ozone服务类型 - **--address**: 一台服务所在的主机与客户端通信的RPC地址 - 提供3中操作: - **start**: 开始异步执行动态加载配置 @@ -38,44 +39,44 @@ ozone admin reconfig --address= start|status|properties - **properties**: 列出支持动态加载的配置项 ## 获取可动态加载的属性列表 -要获取 Ozone 中指定组件的可动态加载属性列表, 可以使用命令 `ozone admin reconfig --address= properties`。 +要获取 Ozone 中指定组件的可动态加载属性列表, 可以使用命令 `ozone admin reconfig --service=[OM|SCM|DATANODE] --address= properties`。 这个命令将会列出所有可以在运行时动态加载的属性。 > 例如, 获取 Ozone OM 可动态加载属性列表 > ->$ `ozone admin reconfig --address=hadoop1:9862 properties`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## OM动态配置 >例如, 在`ozone-site.xml`文件中修改`ozone.administrators`的值并执行: > -> $ `ozone admin reconfig --address=hadoop1:9862 start`
+> $ `ozone admin reconfig --service=OM --address=hadoop1:9862 start`
OM: Started reconfiguration task on node [hadoop1:9862]. > ->$ `ozone admin reconfig --address=hadoop1:9862 status`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 status`
OM: Reconfiguring status for node [hadoop1:9862]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig --address=hadoop1:9862 properties`
+> $ `ozone admin reconfig --service=OM --address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## SCM动态配置 >例如, 在`ozone-site.xml`文件中修改`ozone.administrators`的值并执行: > -> $ `ozone admin reconfig --address=hadoop1:9860 start`
+> $ `ozone admin reconfig --service=SCM --address=hadoop1:9860 start`
SCM: Started reconfiguration task on node [hadoop1:9860]. > ->$ `ozone admin reconfig --address=hadoop1:9860 status`
+>$ `ozone admin reconfig --service=SCM --address=hadoop1:9860 status`
SCM: Reconfiguring status for node [hadoop1:9860]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig --address=hadoop1:9860 properties`
+> $ `ozone admin reconfig --service=SCM --address=hadoop1:9860 properties`
SCM: Node [hadoop1:9860] Reconfigurable properties:
ozone.administrators @@ -83,16 +84,16 @@ ozone.administrators ## Datanode 动态配置 >例如, 在`ozone-site.xml`文件中修改`ozone.example.config`的值并执行: > -> $ `ozone admin reconfig --address=hadoop1:19864 start`
+> $ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 start`
Datanode: Started reconfiguration task on node [hadoop1:19864]. > ->$ `ozone admin reconfig --address=hadoop1:19864 status`
+>$ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 status`
Datanode: Reconfiguring status for node [hadoop1:19864]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.example.config
From: "old"
To: "new" > -> $ `ozone admin reconfig --address=hadoop1:19864 properties`
+> $ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config @@ -104,7 +105,7 @@ ozone.example.config >例如, 列出 Datanode 所有可配置的属性:
-> $ `ozone admin reconfig --in-service-datanodes properties`
+> $ `ozone admin reconfig --service=DATANODE --in-service-datanodes properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config
Datanode: Node [hadoop2:19864] Reconfigurable properties:
diff --git a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md index f9ea5f608461..23c015515035 100644 --- a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md +++ b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md @@ -432,7 +432,7 @@ bash-4.2$ aws s3api --endpoint-url http://s3g:9878 list-objects --bucket bucket- { "Key": "file1", "LastModified": "2022-02-16T00:10:00.000Z", - "ETag": "2022-02-16T00:10:00.000Z", + "ETag": "e99f93dedfe22e9a133dc3c634f14634", "Size": 3811, "StorageClass": "STANDARD" } diff --git a/hadoop-hdds/docs/content/feature/Snapshot.md b/hadoop-hdds/docs/content/feature/Snapshot.md index 880176ec669e..143a1a5f918f 100644 --- a/hadoop-hdds/docs/content/feature/Snapshot.md +++ b/hadoop-hdds/docs/content/feature/Snapshot.md @@ -73,5 +73,5 @@ Ozone also provides SnapshotDiff API. Whenever a user issues a SnapshotDiff betw Snapshot feature places additional demands on the cluster in terms of CPU, memory and storage. Cluster nodes running Ozone Managers and Ozone Datanodes should be configured with extra storage capacity depending on the number of active snapshots that the user wants to keep. Ozone Snapshots consume incremental amount of space per snapshot. e.g. if the active object store has 100 GB data (before replication) and a snapshot is taken, then the 100 GB of space will be locked in that snapshot. If the active object store consumes another 10 GB of space (before replication) subsequently then overall space requirement would be 100 GB + 10 GB = 110 GB in total (before replication). This is because common keys between Ozone snapshots and the active object store will share the storage space. -Similarly, nodes running Ozone Manager should be configured with extra memory depending on how many snapshots are concurrently read from. This also depepnds on how many concurrent SnapshotDiff jobs are expected in the cluster. By default, an Ozone Manager allows 10 concurrent SnapshotDiff jobs at a time, which can be increased in config. +Similarly, nodes running Ozone Manager should be configured with extra memory depending on how many snapshots are concurrently read from. This also depends on how many concurrent SnapshotDiff jobs are expected in the cluster. By default, an Ozone Manager allows 10 concurrent SnapshotDiff jobs at a time, which can be increased in config. diff --git a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md index 5f55afebc3c8..e48a95c8bb9c 100644 --- a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md +++ b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md @@ -43,7 +43,7 @@ Set the following properties to the Ozone configuration file `ozone-site.xml`. - To enable the Streaming Write Pipeline feature, set the following property to true. ```XML - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled false OZONE, CONTAINER, RATIS, DATASTREAM It specifies whether to enable data stream of container. @@ -52,7 +52,7 @@ Set the following properties to the Ozone configuration file `ozone-site.xml`. - Datanodes listen to the following port for the streaming traffic. ```XML - dfs.container.ratis.datastream.port + hdds.container.ratis.datastream.port 9855 OZONE, CONTAINER, RATIS, DATASTREAM The datastream port number of container. diff --git a/hadoop-hdds/docs/content/interface/Cli.zh.md b/hadoop-hdds/docs/content/interface/Cli.zh.md new file mode 100644 index 000000000000..aa34a9245710 --- /dev/null +++ b/hadoop-hdds/docs/content/interface/Cli.zh.md @@ -0,0 +1,311 @@ +--- +title: 命令行接口 +weight: 4 +menu: + main: + parent: "客户端接口" +--- + + + +Ozone shell 是从命令行与 Ozone 交互的主要接口。在后台,它调用 [Java API]({{< ref "interface/JavaApi.md">}}). + +有些功能只能通过使用 `ozone sh` 命令才能访问。例如: + +1. 创建带有配额的卷 +2. 管理内部 ACL +3. 创建带有加密的键的桶 + +所有这些命令都是一次性的管理任务。应用程序也可以使用其他接口,如 Hadoop 兼容文件系统(o3fs 或 ofs)或 S3 接口来实现相同功能而无需使用 Ozone 命令行接口。 + + +Ozone shell 的帮助菜单可以在 _对象_ 级别 或者 _动作_ 级别被调出. + +示例命令: + +```bash +ozone sh volume --help +``` + +这条命令展示了卷的所有可用的 _动作_ 命令 + +或者也可以用来解释具体的某个 _动作_ ,例如: + +```bash +ozone sh volume create --help +``` + +这条命令输出卷的`create`动作的所有命令行选项 + +## 通用命令格式 + +Ozone shell 命令采取以下形式: + +> _ozone sh object action url_ + +**ozone** 脚本用于调用所有 Ozone 子命令。通过 ```sh``` 命令调用 ozone shell 命令。 + +对象可以是卷、桶或键。动作可以是创建、列出、删除等。 + +根据动作,Ozone URL 可以指向以下格式的卷、桶或键: + +_\[schema\]\[server:port\]/volume/bucket/key_ + + +其中, + +1. **Schema** - 应为 `o3`,这是访问 Ozone API 的原生 RPC 协议。是否指定 schema 是可选的。 + +2. **Server:Port** - 应为 Ozone Manager 的地址。如果不指定端口,则将使用 ozone-site.xml 中的默认端口。 + +请查看卷命令、桶命令和键命令部分了解更多详情。 + +## 卷操作 + +卷位于层次结构的顶层,仅由管理员管理。也可以指定所有者用户和配额。 + +示例命令: + +```shell +$ ozone sh volume create /vol1 +``` + +```shell +$ ozone sh volume info /vol1 +{ + "metadata" : { }, + "name" : "vol1", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-28T12:31:50.112Z", + "modificationTime" : "2020-07-28T12:31:50.112Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +} +``` + +```shell +$ ozone sh volume list / +[ { + "metadata" : { }, + "name" : "s3v", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-27T11:32:22.314Z", + "modificationTime" : "2020-07-27T11:32:22.314Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +}, { + .... +} ] +``` + +如果卷为空,我们可以使用以下命令删除卷。 + +```shell +$ ozone sh volume delete /vol1 +Volume vol1 is deleted +``` +如果卷包含任意桶或键,我们可以递归地删除卷。这将删除卷中所有的桶和键,然后删除卷本身。在运行这个命令后,将无法恢复已删除的内容。 + +```shell +$ ozone sh volume delete -r /vol1 +This command will delete volume recursively. +There is no recovery option after using this command, and no trash for FSO buckets. +Delay is expected running this command. +Enter 'yes' to proceed': yes +Volume vol1 is deleted +``` + +## 桶操作 + +桶是层次结构的第二层级,与 AWS S3 桶相似。如果用户有必要的权限,可以在卷中创建桶。 + +示例命令: + +```shell +$ ozone sh bucket create /vol1/bucket1 +``` + +```shell +$ ozone sh bucket info /vol1/bucket1 +{ + "metadata" : { }, + "volumeName" : "vol1", + "name" : "bucket1", + "storageType" : "DISK", + "versioning" : false, + "creationTime" : "2020-07-28T13:14:45.091Z", + "modificationTime" : "2020-07-28T13:14:45.091Z", + "encryptionKeyName" : null, + "sourceVolume" : null, + "sourceBucket" : null +} +``` + +如果桶是空的,我们可以用以下命令来删除桶。 + +```shell +$ ozone sh bucket delete /vol1/bucket1 +Bucket bucket1 is deleted +``` + +如果桶包含任意键,我们可以递归地删除桶。这将删除桶中的所有键,然后删除桶本身。在运行这个命令后,将无法恢复已删除的内容。 + +```shell +$ ozone sh bucket delete -r /vol1/bucket1 +This command will delete bucket recursively. +There is no recovery option after using this command, and deleted keys won't move to trash. +Enter 'yes' to proceed': yes +Bucket bucket1 is deleted +``` +[透明数据加密]({{< ref "security/SecuringTDE.md" >}}) 可以在桶层级被启用。 + +## 键操作 + +键是可以存储数据的对象。 + +```shell +$ ozone sh key put /vol1/bucket1/README.md README.md +``` + +

+ + + +```shell +$ ozone sh key info /vol1/bucket1/README.md +{ + "volumeName" : "vol1", + "bucketName" : "bucket1", + "name" : "README.md", + "dataSize" : 3841, + "creationTime" : "2020-07-28T13:17:20.749Z", + "modificationTime" : "2020-07-28T13:17:21.979Z", + "replicationType" : "RATIS", + "replicationFactor" : 1, + "ozoneKeyLocations" : [ { + "containerID" : 1, + "localID" : 104591670688743424, + "length" : 3841, + "offset" : 0 + } ], + "metadata" : { }, + "fileEncryptionInfo" : null +} +``` + +```shell +$ ozone sh key get /vol1/bucket1/README.md /tmp/ +``` + +```shell +$ ozone sh key delete /vol1/bucket1/key1 +``` + + +如果键是在 [FSO]({{< ref "feature/PrefixFSO.zh.md">}}) 桶中,当删除键时它会被移动到回收站,回收站的位置是: +```shell +$ ///.Trash/ +``` +如果键是在OBS桶中,它将被永久删除。 + +## 查询命令行结果 + +Ozone命令行返回JSON响应。[jq](https://stedolan.github.io/jq/manual/) 是一个命令行JSON处理器,可以用来过滤CLI结果以获取所需信息. + +示例命令: + +* 列出不是链接的 FSO 桶。 +```shell +$ ozone sh bucket list /s3v | jq '.[] | select(.link==false and .bucketLayout=="FILE_SYSTEM_OPTIMIZED")' +{ + "metadata": {}, + "volumeName": "s3v", + "name": "fso-bucket", + "storageType": "DISK", + "versioning": false, + "usedBytes": 0, + "usedNamespace": 0, + "creationTime": "2023-02-01T05:18:46.974Z", + "modificationTime": "2023-02-01T05:18:46.974Z", + "quotaInBytes": -1, + "quotaInNamespace": -1, + "bucketLayout": "FILE_SYSTEM_OPTIMIZED", + "owner": "om", + "link": false +} +``` + +* 列出 EC 桶以及它们的复制策略配置。 +```shell +$ ozone sh bucket list /vol1 | jq -r '.[] | select(.replicationConfig.replicationType == "EC") | {"name": .name, "replicationConfig": .replicationConfig}' +{ + "name": "ec5", + "replicationConfig": { + "data": 3, + "parity": 2, + "ecChunkSize": 1048576, + "codec": "RS", + "replicationType": "EC", + "requiredNodes": 5 + } +} +{ + "name": "ec9", + "replicationConfig": { + "data": 6, + "parity": 3, + "ecChunkSize": 1048576, + "codec": "RS", + "replicationType": "EC", + "requiredNodes": 9 + } +} +``` + +* 以制表符分隔的格式列出加密桶的名字以及它们的加密的键名。 +```shell + +$ ozone sh bucket list /vol1 | jq -r '.[] | select(.encryptionKeyName != null) | [.name, .encryptionKeyName] | @tsv' +ec5 key1 +encrypted-bucket key1 +``` diff --git a/hadoop-hdds/docs/content/interface/HttpFS.md b/hadoop-hdds/docs/content/interface/HttpFS.md index e413faf03cde..cebe0d315b02 100644 --- a/hadoop-hdds/docs/content/interface/HttpFS.md +++ b/hadoop-hdds/docs/content/interface/HttpFS.md @@ -84,7 +84,7 @@ Truncate a File | not implemented in Ozone Status of a File/Directory | supported List a Directory | supported List a File | supported -Iteratively List a Directory | supported +Iteratively List a Directory | unsupported ### Other File System Operations diff --git a/hadoop-hdds/docs/content/interface/Ofs.zh.md b/hadoop-hdds/docs/content/interface/Ofs.zh.md new file mode 100644 index 000000000000..25d7039f49ac --- /dev/null +++ b/hadoop-hdds/docs/content/interface/Ofs.zh.md @@ -0,0 +1,249 @@ +--- +title: Ofs (兼容 Hadoop 的文件系统) +date: 2017-09-14 +weight: 1 +menu: + main: + parent: "编程接口" +summary: Hadoop Compatible file system allows any application that expects an HDFS like interface to work against Ozone with zero changes. Frameworks like Apache Spark, YARN and Hive work against Ozone without needing any change. **Global level view.** +--- + + +兼容 Hadoop 的文件系统 (HCFS) 接口允许像 Ozone 这样的存储后端轻松集成到 Hadoop 生态系统中。Ozone 文件系统 (OFS) 是一个兼容 Hadoop 的文件系统。 + + + + +## 基础知识 + +有效的 OFS 路径示例: + +``` +ofs://om1/ +ofs://om3:9862/ +ofs://omservice/ +ofs://omservice/volume1/ +ofs://omservice/volume1/bucket1/ +ofs://omservice/volume1/bucket1/dir1 +ofs://omservice/volume1/bucket1/dir1/key1 + +ofs://omservice/tmp/ +ofs://omservice/tmp/key1 +``` + +在 OFS 文件系统中,卷和挂载点位于根目录级别。卷的下一级是桶。每个桶下面是键和目录。 + +请注意,对于挂载点,目前仅支持临时挂载 /tmp。 + +## 配置 + +请在 `core-site.xml` 添加下列配置。 + +{{< highlight xml >}} + + fs.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + + + fs.defaultFS + ofs://om-host.example.com/ + +{{< /highlight >}} + +这将使所有的卷和桶成为默认的 Hadoop 兼容文件系统,并注册 ofs 文件系统类型。 + +您还需要将 ozone-filesystem-hadoop3.jar 文件添加到 classpath 中: + +{{< highlight bash >}} +export HADOOP_CLASSPATH=/opt/ozone/share/ozone/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH +{{< /highlight >}} + +(请注意: 在 Hadoop 2.x 中, 请使用 `ozone-filesystem-hadoop2-*.jar`) + +当默认的文件系统被建立,用户可以运行命令例如ls,put,mkdir等。 +例如: + +{{< highlight bash >}} +hdfs dfs -ls / +{{< /highlight >}} + +请注意,ofs 对所有桶和卷都有效。用户可以使用 mkdir 创建桶和卷,例如创建名为 volume1 的卷和名为 bucket1 的桶 + +{{< highlight bash >}} +hdfs dfs -mkdir /volume1 +hdfs dfs -mkdir /volume1/bucket1 +{{< /highlight >}} + +或者使用 put 命令向桶中写入一个文件 + +{{< highlight bash >}} +hdfs dfs -put /etc/hosts /volume1/bucket1/test +{{< /highlight >}} + +有关更多用法,请参见: https://issues.apache.org/jira/secure/attachment/12987636/Design%20ofs%20v1.pdf + +## 与 [o3fs]({{< ref "interface/O3fs.md" >}}) 的区别 + +### 创建文件 + +OFS 不允许直接在根目录或卷下创建键(文件)。 +当用户尝试这样做时,他们将收到一个错误消息: + +```bash +$ ozone fs -touch /volume1/key1 +touch: Cannot create file under root or volume. +``` + +### 简化 fs.defaultFS + +使用 OFS 时,fs.defaultFS(在 core-site.xml 中)不再需要像 o3fs 那样在其路径中具有特定的卷和桶。 +只需设置 OM 主机名或 service ID(在 HA 的情况下): + + +```xml + + fs.defaultFS + ofs://omservice + +``` + +客户端将能够访问集群上的所有卷和桶,而无需指定主机名或 service ID。 + +```bash +$ ozone fs -mkdir -p /volume1/bucket1 +``` + +### 通过 FileSystem shell 直接管理卷和桶 + +管理员可以通过 Hadoop FS shell 轻松创建和删除卷和桶。卷和桶被视为类似于目录,因此如果它们不存在,可以使用 `-p` 创建: + +```bash +$ ozone fs -mkdir -p ofs://omservice/volume1/bucket1/dir1/ +``` +请注意,卷和桶名称字符集规则仍然适用。例如,桶和卷名称不接受下划线(`_`): + +```bash +$ ozone fs -mkdir -p /volume_1 +mkdir: Bucket or Volume name has an unsupported character : _ +``` + +## 挂载点和设置 /tmp + +为了与使用 /tmp/ 的传统 Hadoop 应用程序兼容,我们在 FS 的根目录有一个特殊的临时目录挂载点。 +这个功能将来可能会扩展,以支持自定义挂载路径。 + +目前 Ozone 支持两种 /tmp 的配置。第一种(默认)是每个用户的临时目录, +由一个挂载卷和一个用户特定的临时桶组成。第二种(通过 ozone-site.xml 配置) +是一个类似粘滞位的临时目录,对所有用户共用,由一个挂载卷和一个共用的临时桶组成。 + +重要提示:要使用它,首先,**管理员** 需要创建名为 tmp 的卷(卷名目前是硬编码的)并将其 ACL 设置为 world ALL 访问权限。 + +具体来说: + +```bash +$ ozone sh volume create tmp +$ ozone sh volume setacl tmp -al world::a +``` + +每个集群中这些命令**仅需要执行一次** + +### 对于每个用户的 /tmp 目录 (默认) + +**每个用户** 都需要先创建并初始化他们自己的 temp 桶一次 + +```bash +$ ozone fs -mkdir /tmp +2020-06-04 00:00:00,050 [main] INFO rpc.RpcClient: Creating Bucket: tmp/0238 ... +``` + +在此之后用户可以向该目录写入,就和向其他常规目录写入一样。例如: + +```bash +$ ozone fs -touch /tmp/key1 +``` + +### 对于所有用户共享的 /tmp 目录 + +要启用类似粘滞位的共享 /tmp 目录,请在 ozone-site.xml 中更新以下配置: + +```xml + + ozone.om.enable.ofs.shared.tmp.dir + true + +``` + +然后,在以**管理员**身份设置好 tmp 卷之后,还需要配置一个 tmp 桶,作为所有用户的共享 /tmp 目录,例如: + +```bash +$ ozone sh bucket create /tmp/tmp +$ ozone sh volume setacl tmp -a user:anyuser:rwlc \ + user:adminuser:a,group:anyuser:rwlc,group:adminuser:a tmp/tmp +``` + +在这里,anyuser 是管理员希望授予访问权限的用户名,而 adminuser 是管理员的用户名。 + +然后用户可以访问 tmp 目录: + +```bash +$ ozone fs -put ./NOTICE.txt ofs://om/tmp/key1 +``` + +## 启用回收站的删除操作 + +为了在 Ozone 中启用回收站,请将这些配置添加到 core-site.xml: + +{{< highlight xml >}} + + fs.trash.interval + 10 + + + fs.trash.classname + org.apache.hadoop.ozone.om.TrashPolicyOzone + +{{< /highlight >}} + +当启用回收站功能后删除键时,这些键会被移动到每个桶下的一个回收站目录中,因为在 Ozone 中不允许将键在桶之间移动(重命名)。 + +```bash +$ ozone fs -rm /volume1/bucket1/key1 +2020-06-04 00:00:00,100 [main] INFO fs.TrashPolicyDefault: Moved: 'ofs://id1/volume1/bucket1/key1' to trash at: ofs://id1/volume1/bucket1/.Trash/hadoop/Current/volume1/bucket1/key1 +``` + +这与 HDFS encryption zone 处理回收站位置的方式非常相似。 + +**请注意** + +1. 可以使用标志 `-skipTrash` 来永久删除文件,而不将其移动到回收站。 +2. 启用回收站时,不允许在桶或卷级别进行删除操作。在这种情况下,必须使用 skipTrash。 +即,不使用 skipTrash 的情况下,不允许使用 `ozone fs -rm -R ofs://vol1/bucket1` 或 `ozone fs -rm -R o3fs://bucket1.vol1` 进行操作。 + +## 递归地列出 + +OFS 支持递归地列出卷、桶和键。 + +例如,如果启用了 ACL 的话, 命令 `ozone fs -ls -R ofs://omservice/` 会递归地列出用户有 LIST 权限的所有卷、桶和键。 +如果禁用了 ACL,这个命令会列出该集群上的所有内容。 + +这个功能不会降低服务器性能,因为循环操作是在客户端上进行的。可以将其视为客户端向服务器发出多个请求以获取所有信息的过程。 diff --git a/hadoop-hdds/docs/content/interface/ReconApi.md b/hadoop-hdds/docs/content/interface/ReconApi.md index 1b786efd546a..485c3b0fd42e 100644 --- a/hadoop-hdds/docs/content/interface/ReconApi.md +++ b/hadoop-hdds/docs/content/interface/ReconApi.md @@ -927,6 +927,39 @@ Returns all the datanodes in the cluster. ] } ``` + +### PUT /api/v1/datanodes/remove + +**Parameters** + +* uuids (List of node uuids in JSON array format). + +```json +[ + "50ca4c95-2ef3-4430-b944-97d2442c3daf" +] +``` + +**Returns** + +Returns the list of datanodes which are removed successfully and list of datanodes which were not found. + +```json +{ + "removedNodes": { + "totalCount": 1, + "datanodes": [ + { + "uuid": "50ca4c95-2ef3-4430-b944-97d2442c3daf", + "hostname": "ozone-datanode-4.ozone_default", + "state": "DEAD", + "pipelines": null + } + ], + "message": "Success" + } +} +``` ## Pipelines diff --git a/hadoop-hdds/docs/content/interface/ReconApi.zh.md b/hadoop-hdds/docs/content/interface/ReconApi.zh.md index 7fa4b27b0dee..586fef16b2ff 100644 --- a/hadoop-hdds/docs/content/interface/ReconApi.zh.md +++ b/hadoop-hdds/docs/content/interface/ReconApi.zh.md @@ -222,7 +222,497 @@ Recon API v1 是一组 HTTP 端点,可以帮助您了解 Ozone 集群的当前 回传处于给定状态的容器的 UnhealthyContainerMetadata 对象。 不健康的容器状态可能为`MISSING`, `MIS_REPLICATED`, `UNDER_REPLICATED`,`OVER_REPLICATED`。 响应结构与`/containers/unhealthy`相同。 - + + +### GET /api/v1/containers/mismatch + +**回传** + +回传 OM 和 SCM 之间不匹配容器的列表。 +* 容器存在于 OM 中,但不存在于 SCM 中。 +* 容器存在于 SCM 中,但不存在于 OM 中。 + +```json +[ + { + "containerId" : 1, + "numberOfKeys" : 3, + "pipelines" : [ + "pipelineId" : "1423ghjds832403232", + "pipelineId" : "32vds94943fsdh4443", + "pipelineId" : "32vds94943fsdhs443" + ], + "existsAt" : "OM" + } + ... +] +``` + +### GET /api/v1/containers/mismatch/deleted + + +**参数** + +* prevKey (可选) + +返回在SCM中,给定prevKey(容器ID) 后被标记为已删除状态,且在OM中存在的容器集合, +以便找出映射到这些已删除状态容器的键列表。例如:prevKey=5,跳过直到准确地定位到前一个容器ID。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回在SCM中已删除但在OM中存在的容器集合,以找出映射到这些已删除状态容器的键列表。 + +```json +[ + { + "containerId": 2, + "numberOfKeys": 2, + "pipelines": [] + } + ... +] +``` + +### GET /api/v1/keys/open + + +**参数** + +* prevKey (可选) + + 返回给定 prevKey id 之后仍然处于打开状态且存在的键/文件集合。 + 例如:prevKey=/vol1/bucket1/key1,这将跳过键,直到成功定位到给定的 prevKey。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回处于打开状态的键/文件集合。 + +```json +{ + "lastKey": "/vol1/fso-bucket/dir1/dir2/file2", + "replicatedTotal": 13824, + "unreplicatedTotal": 4608, + "entities": [ + { + "path": "/vol1/bucket1/key1", + "keyState": "Open", + "inStateSince": 1667564193026, + "size": 1024, + "replicatedSize": 3072, + "unreplicatedSize": 1024, + "replicationType": "RATIS", + "replicationFactor": "THREE" + }, + { + "path": "/vol1/bucket1/key2", + "keyState": "Open", + "inStateSince": 1667564193026, + "size": 512, + "replicatedSize": 1536, + "unreplicatedSize": 512, + "replicationType": "RATIS", + "replicationFactor": "THREE" + }, + { + "path": "/vol1/fso-bucket/dir1/file1", + "keyState": "Open", + "inStateSince": 1667564193026, + "size": 1024, + "replicatedSize": 3072, + "unreplicatedSize": 1024, + "replicationType": "RATIS", + "replicationFactor": "THREE" + }, + { + "path": "/vol1/fso-bucket/dir1/dir2/file2", + "keyState": "Open", + "inStateSince": 1667564193026, + "size": 2048, + "replicatedSize": 6144, + "unreplicatedSize": 2048, + "replicationType": "RATIS", + "replicationFactor": "THREE" + } + ] +} +``` + +### GET /api/v1/keys/deletePending + + +**参数** + +* prevKey (可选) + + 返回给定 prevKey id 之后处于待删除状态的键/文件集合。 + 例如:prevKey=/vol1/bucket1/key1,这将跳过键,直到成功定位到给定的 prevKey。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回处于待删除状态的键/文件集合。 + +```json +{ + "lastKey": "sampleVol/bucketOne/key_one", + "replicatedTotal": -1530804718628866300, + "unreplicatedTotal": -1530804718628866300, + "deletedkeyinfo": [ + { + "omKeyInfoList": [ + { + "metadata": {}, + "objectID": 0, + "updateID": 0, + "parentObjectID": 0, + "volumeName": "sampleVol", + "bucketName": "bucketOne", + "keyName": "key_one", + "dataSize": -1530804718628866300, + "keyLocationVersions": [], + "creationTime": 0, + "modificationTime": 0, + "replicationConfig": { + "replicationFactor": "ONE", + "requiredNodes": 1, + "replicationType": "STANDALONE" + }, + "fileChecksum": null, + "fileName": "key_one", + "acls": [], + "path": "0/key_one", + "file": false, + "latestVersionLocations": null, + "replicatedSize": -1530804718628866300, + "fileEncryptionInfo": null, + "objectInfo": "OMKeyInfo{volume='sampleVol', bucket='bucketOne', key='key_one', dataSize='-1530804718628866186', creationTime='0', objectID='0', parentID='0', replication='STANDALONE/ONE', fileChecksum='null}", + "updateIDset": false + } + ] + } + ], + "status": "OK" +} +``` + +### GET /api/v1/keys/deletePending/dirs + + +**参数** + +* prevKey (可选) + + 返回给定 prevKey id 之后处于待删除状态的目录集合。 + 例如:prevKey=/vol1/bucket1/bucket1/dir1,这将跳过目录,直到成功定位到给定的 prevKey。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回处于待删除状态的目录集合。 + +```json +{ + "lastKey": "vol1/bucket1/bucket1/dir1", + "replicatedTotal": -1530804718628866300, + "unreplicatedTotal": -1530804718628866300, + "deletedkeyinfo": [ + { + "omKeyInfoList": [ + { + "metadata": {}, + "objectID": 0, + "updateID": 0, + "parentObjectID": 0, + "volumeName": "sampleVol", + "bucketName": "bucketOne", + "keyName": "key_one", + "dataSize": -1530804718628866300, + "keyLocationVersions": [], + "creationTime": 0, + "modificationTime": 0, + "replicationConfig": { + "replicationFactor": "ONE", + "requiredNodes": 1, + "replicationType": "STANDALONE" + }, + "fileChecksum": null, + "fileName": "key_one", + "acls": [], + "path": "0/key_one", + "file": false, + "latestVersionLocations": null, + "replicatedSize": -1530804718628866300, + "fileEncryptionInfo": null, + "objectInfo": "OMKeyInfo{volume='sampleVol', bucket='bucketOne', key='key_one', dataSize='-1530804718628866186', creationTime='0', objectID='0', parentID='0', replication='STANDALONE/ONE', fileChecksum='null}", + "updateIDset": false + } + ] + } + ], + "status": "OK" +} +``` + +## Blocks Metadata (admin only) +### GET /api/v1/blocks/deletePending + + +**参数** + +* prevKey (可选) + + 仅返回给定块ID(prevKey)之后处于待删除状态的块列表。 + 例如:prevKey=4,这将跳过 deletedBlocks 表中的键以跳过 prevKey 之前的记录。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回待删除的块列表。 + +```json +{ + "OPEN": [ + { + "containerId": 100, + "localIDList": [ + 1, + 2, + 3, + 4 + ], + "localIDCount": 4, + "txID": 1 + } + ] +} +``` + +## Namespace Metadata (仅 admin) + +### GET /api/v1/namespace/summary + +**参数** + +* path + + 字符串形式的路径请求,不包含任何协议前缀。 + +**回传** + +返回路径的基本信息汇总,包括实体类型和路径下对象的聚合计数。 + +如果路径存在,则 `status` 为 `OK`,否则为 `PATH_NOT_FOUND`。 + +示例: /api/v1/namespace/summary?path=/ +```json + { + "status": OK, + "type": ROOT, + "numVolume": 10, + "numBucket": 100, + "numDir": 1000, + "numKey": 10000 + } +``` + +示例: /api/v1/namespace/summary?path=/volume1 +```json + { + "status": OK, + "type": VOLUME, + "numVolume": -1, + "numBucket": 10, + "numDir": 100, + "numKey": 1000 + } +``` + +示例: /api/v1/namespace/summary?path=/volume1/bucket1 +```json + { + "status": OK, + "type": BUCKET, + "numVolume": -1, + "numBucket": -1, + "numDir": 50, + "numKey": 500 + } +``` + +示例: /api/v1/namespace/summary?path=/volume1/bucket1/dir +```json + { + "status": OK, + "type": DIRECTORY, + "numVolume": -1, + "numBucket": -1, + "numDir": 10, + "numKey": 100 + } +``` + +示例: /api/v1/namespace/summary?path=/volume1/bucket1/dir/nestedDir +```json + { + "status": OK, + "type": DIRECTORY, + "numVolume": -1, + "numBucket": -1, + "numDir": 5, + "numKey": 50 + } +``` + +如果任何 `num` 字段为 `-1`,则该路径请求不适用于该实体类型。 + +### GET /api/v1/namespace/du + +**参数** + +* path + + 字符串形式的路径请求,不包含任何协议前缀。 + +* files (可选) + + 一个布尔值,默认值为 `false`。如果设置为 `true`,则会计算路径下键的磁盘使用情况。 + +* replica (可选) + + 一个布尔值,默认为 `false`。如果设置为 `true`,则会计算键的副本大小的磁盘使用情况。 + +**回传** + +返回路径下所有子路径的磁盘使用情况。规范化 `path` 字段,返回路径下直接健的总大小作为 +`sizeDirectKey`,并以字节为单位返回 `size/sizeWithReplica`。 + +如果路径存在,则 `status` 为 `OK`,否则为 `PATH_NOT_FOUND`。 + +示例: /api/v1/namespace/du?path=/vol1/bucket1&files=true&replica=true +```json + { + "status": OK, + "path": "/vol1/bucket1", + "size": 100000, + "sizeWithReplica": 300000, + "subPathCount": 4, + "subPaths": [ + { + "path": "/vol1/bucket1/dir1-1", + "size": 30000, + "sizeWithReplica": 90000, + "isKey": false + }, + { + "path": "/vol1/bucket1/dir1-2", + "size": 30000, + "sizeWithReplica": 90000, + "isKey": false + }, + { + "path": "/vol1/bucket1/dir1-3", + "size": 30000, + "sizeWithReplica": 90000, + "isKey": false + }, + { + "path": "/vol1/bucket1/key1-1", + "size": 10000, + "sizeWithReplica": 30000, + "isKey": true + } + ], + "sizeDirectKey": 10000 + } +``` +如果 `files` 设置为 `false`,则子路径 `/vol1/bucket1/key1-1` 将被省略。 +如果 `replica` 设置为 `false`,则 `sizeWithReplica` 返回 `-1`。 +如果路径的实体类型无法具有直接键(例如根目录、卷),则 `sizeDirectKey` 返回 `-1`。 + +### GET /api/v1/namespace/quota + +**参数** + +* path + + 路径请求为字符串,不包含任何协议前缀。 + +**回传** + +返回路径下允许的配额和已使用的配额。 +只有卷和存储桶具有配额。其他类型不适用于配额请求 + +如果请求有效,则 `status` 为 `OK`;如果路径不存在,则为 `PATH_NOT_FOUND`; +如果路径存在但路径的实体类型不适用于请求,则为 `TYPE_NOT_APPLICABLE`。 + +示例: /api/v1/namespace/quota?path=/vol +```json + { + "status": OK, + "allowed": 200000, + "used": 160000 + } +``` + +如果未设置配额,则 `allowed` 返回 `-1`。详情请参阅 [Ozone 中的配额]。 +(https://ci-hadoop.apache.org/view/Hadoop%20Ozone/job/ozone-doc-master/lastSuccessfulBuild/artifact/hadoop-hdds/docs/public/feature/quota.html) + + +### GET /api/v1/namespace/dist + +**参数** + +* path + + 路径请求为字符串,不包含任何协议前缀。 + +**回传** + +返回路径下所有键的文件大小分布。 + +如果请求有效,则 `status` 为 `OK`;如果路径不存在,则为 `PATH_NOT_FOUND`; +如果路径存在,但该路径是一个键,键不具有文件大小分布,则为 `TYPE_NOT_APPLICABLE`。 + +示例: /api/v1/namespace/dist?path=/ +```json + { + "status": OK, + "dist": [ + 0, + 0, + 10, + 20, + 0, + 30, + 0, + 100, + ... + ] + } +``` + +Recon跟踪所有大小从`1 KB`到`1 PB`的键。对于小于`1 KB`的键,映射到第一个箱(索引); +对于大于`1 PB`的键,映射到最后一个箱(索引)。 + +`dist` 的每个索引都映射到一个文件大小范围(例如 `1 MB` 到 `2 MB`)。 + ## 集群状态 ### GET /api/v1/clusterState @@ -251,6 +741,114 @@ Recon API v1 是一组 HTTP 端点,可以帮助您了解 Ozone 集群的当前 "keys": 25 } ``` + +## Volumes (仅 admin) + +### GET /api/v1/volumes + +**参数** + +* prevKey (可选) + + 仅返回给定 prevKey 之后的卷。 + 示例: prevKey=vol1 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回集群中的所有卷。 + +```json + { + "totalCount": 4, + "volumes": [{ + "volume": "vol1", + "owner": "testuser", + "admin": "ozone", + "creationTime": 1665588176660 , + "modificationTime": 1665590397315, + "quotaInNamespace": 2048, + "quotaInBytes": 1073741824, + "usedNamespace": 10, + "acls": [ + { + "type": "USER", + "name": "testuser", + "scope": "ACCESS", + "aclList": [ + "WRITE", + "READ", + "DELETE" + ] + } + ] + }, + ... + ] + } +``` + +## Buckets (仅 admin) + +### GET /api/v1/buckets + +**参数** + +* volume (可选) + + 卷以字符串形式表示,不包含任何协议前缀。 + +* prevKey (可选) + + 返回给定 prevKey 之后的存储桶。 如果未指定卷,则忽略 prevKey。 + 示例: prevKey=bucket1 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + + +**回传** + +如果未指定卷或指定的卷是一个空字符串,则返回集群中的所有存储桶。 +如果指定了 `volume`,则仅返回 `volume` 下的存储桶。 + +```json + { + "totalCount": 5, + "buckets": [{ + "volumeName": "vol1", + "bucketName": "buck1", + "versioning": false, + "storageType": "DISK", + "creationTime": 1665588176616, + "modificationTime": 1665590392293, + "usedBytes": 943718400, + "usedNamespace": 40000, + "quotaInBytes": 1073741824, + "quotaInNamespace": 50000, + "owner": "testuser", + "bucketLayout": "OBJECT_STORE", + "acls": [ + { + "type": "USER", + "name": "testuser", + "scope": "ACCESS", + "aclList": [ + "WRITE", + "READ", + "DELETE" + ] + } + ] + }, + ... + ] + } +``` ## 数据节点 diff --git a/hadoop-hdds/docs/content/interface/_index.zh.md b/hadoop-hdds/docs/content/interface/_index.zh.md index fd435aad5dce..82c5e1fb9c97 100644 --- a/hadoop-hdds/docs/content/interface/_index.zh.md +++ b/hadoop-hdds/docs/content/interface/_index.zh.md @@ -1,5 +1,5 @@ --- -title: "编程接口" +title: "客户端接口" menu: main: weight: 5 diff --git a/hadoop-hdds/docs/content/recipe/BotoClient.zh.md b/hadoop-hdds/docs/content/recipe/BotoClient.zh.md new file mode 100644 index 000000000000..64a1d8748a64 --- /dev/null +++ b/hadoop-hdds/docs/content/recipe/BotoClient.zh.md @@ -0,0 +1,188 @@ +--- +title: 使用 Boto3 客户端访问 Ozone 对象存储 +linktitle: Boto3 +summary: 如何使用 Boto3 客户端访问 Ozone 对象存储? +--- + + +这个指南展示了如何从 Boto3 客户端访问 Ozone 对象存储。以下 API 已经过验证: + +- Create bucket +- List bucket +- Head bucket +- Delete bucket +- Upload file +- Download file +- Delete objects(keys) +- Head object +- Multipart upload + + +## 要求 + +您将需要较高版本的 Python3 来运行 Boto3 客户端,请参考 Boto3 的安装需求: + +https://boto3.amazonaws.com/v1/documentation/api/latest/index.html + +## 获取对 Ozone 的资源访问 +您可以参考 Amazon Boto3 文档,关于创建 `s3` 资源的内容在此处: +https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html + + s3 = boto3.resource('s3', + endpoint_url='http://localhost:9878', + aws_access_key_id='testuser/scm@EXAMPLE.COM', + aws_secret_access_key='c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999' + ) + 'endpoint_url' is pointing to Ozone s3 endpoint. + + +## 通过 session 获取对 Ozone 的客户端访问 +您可以参考 Amazon Boto3 文档,关于 session 的内容在此处: +https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html + + Create a session + session = boto3.session.Session() + + Obtain s3 client to Ozone via session: + + s3_client = session.client( + service_name='s3', + aws_access_key_id='testuser/scm@EXAMPLE.COM', + aws_secret_access_key='c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999', + endpoint_url='http://localhost:9878', + ) + 'endpoint_url' is pointing to Ozone s3 endpoint. + + In our code sample below, we're demonstrating the usage of both s3 and s3_client. + +如果您连接到一个安全的集群,有多种方式配置 Boto3 客户端凭证。在这些情况下,创建 Ozone s3 客户端时传递 `aws_access_key_id` 和 `aws_secret_access_key` 的上述步骤应该被跳过。 + +请参考 Boto3 文档以获取详细信息,在此处: +https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html + + +### 创建桶 + response = s3_client.create_bucket(Bucket='bucket1') + print(response) + +这将在一个名为 `s3v` 的卷中创建一个名为 `bucket1` 的桶 + +### 列出所有桶 + response = s3_client.list_buckets() + print('Existing buckets:') + for bucket in response['Buckets']: + print(f' {bucket["Name"]}') + +这将列出 Ozone `s3v` 卷中的所有桶 + +### 查看桶信息 + response = s3_client.head_bucket(Bucket='bucket1') + print(response) + +这将在 Ozone 卷 `s3v` 中查看桶 `bucket1` 的信息。 + +### 删除桶 + response = s3_client.delete_bucket(Bucket='bucket1') + print(response) + +这将从 Ozone 卷 `s3v` 中删除一个桶 `bucket1`。 + +### 上传文件 + response = s3.Bucket('bucket1').upload_file('./README.md','README.md') + print(response) + +这将从向 Ozone 卷 `s3v` 和桶 `bucket1` 中上传 `README.md` 文件并创建一个 `README.md` 键。 + +### 下载文件 + response = s3.Bucket('bucket1').download_file('README.md', 'download.md') + print(response) + +这将从从 Ozone 卷 `s3v` 和桶 `bucket1` 中下载 `README.md` 并创建一个 `README.md` 文件到本地。 + +### 查看对象信息 + response = s3_client.head_object(Bucket='bucket1', Key='README.md') + print(response) + +这将查看一个位于 Ozone 卷 `s3v` 和桶 `bucket1` 中的 `README.md` 文件的信息。 + +### 删除多个对象 + response = s3_client.delete_objects( + Bucket='bucket1', + Delete={ + 'Objects': [ + { + 'Key': 'README4.md', + }, + { + 'Key': 'README3.md', + }, + ], + 'Quiet': False, + }, + ) + +这将从 Ozone 卷 `s3v` 和桶 `bucket1` 中删除多个对象 `README3.md` 和 `README4.md` + +### 分片上传 + response = s3_client.create_multipart_upload(Bucket='bucket1', Key='key1') + print(response) + uid=response['UploadId'] + print(uid) + + response = s3_client.upload_part_copy( + Bucket='bucket1', + CopySource='/bucket1/maven.gz', + Key='key1', + PartNumber=1, + UploadId=str(uid) + ) + print(response) + etag1=response.get('CopyPartResult').get('ETag') + print(etag1) + + response = s3_client.upload_part_copy( + Bucket='bucket1', + CopySource='/bucket1/maven1.gz', + Key='key1', + PartNumber=2, + UploadId=str(uid) + ) + print(response) + etag2=response.get('CopyPartResult').get('ETag') + print(etag2) + + response = s3_client.complete_multipart_upload( + Bucket='bucket1', + Key='key1', + MultipartUpload={ + 'Parts': [ + { + 'ETag': str(etag1), + 'PartNumber': 1, + }, + { + 'ETag': str(etag2), + 'PartNumber': 2, + }, + ], + }, + UploadId=str(uid), + ) + print(response) + +这将使用来自 Ozone 卷 `s3v` 的 `maven.gz` 和 `maven1.gz` 作为复制源,以创建 Ozone 卷 `s3v` 中的新对象 `key1`。请注意,`ETag` 是必需的且对于使用分片上传 API 非常重要。 diff --git a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md new file mode 100644 index 000000000000..07b3f6164f6f --- /dev/null +++ b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md @@ -0,0 +1,130 @@ +--- +title: "安全化 HTTP" +date: "2020-June-17" +summary: 安全化 Ozone 服务的 HTTP 网络控制台 +weight: 4 +menu: + main: + parent: 安全 +icon: lock +--- + + +本文档介绍了如何配置 Ozone HTTP Web 控制台以要求用户身份验证。 + +### 默认身份验证 + +默认情况下 Ozone HTTP Web 控制台 (OM、SCM、S3G、Recon、Datanode) 根据以下默认配置允许无需身份验证的访问。 + +参数 | 值 +-----------------------------------|----------------------------------------- +ozone.security.http.kerberos.enabled | false +ozone.http.filter.initializers | + +如果您有一个启用了 SPNEGO 的 Ozone 集群,并且想要为所有 Ozone 服务禁用它,只需确保按上述两个参数配置即可。 + +### 基于 Kerberos 的 SPNEGO 身份验证 + +身份验证也可以配置为要求使用 HTTP SPNEGO 协议(被 Firefox 和 Chrome 等浏览器所支持)。为了实现这一点,必须先配置以下参数。 + +参数 | 值 +-----------------------------------|----------------------------------------- +hadoop.security.authentication | kerberos +ozone.security.http.kerberos.enabled | true +ozone.http.filter.initializers | org.apache.hadoop.security.AuthenticationFilterInitializer + +之后,各个组件需要正确配置才能完全启用 SPNEGO 或 SIMPLE 身份验证。 + +### 为 OM HTTP 启用 SPNEGO 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +ozone.om.http.auth.type | kerberos +ozone.om.http.auth.kerberos.principal | HTTP/_HOST@REALM +ozone.om.http.auth.kerberos.keytab| /path/to/HTTP.keytab + +### 为 S3G HTTP 启用 SPNEGO 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +ozone.s3g.http.auth.type | kerberos +ozone.s3g.http.auth.kerberos.principal | HTTP/_HOST@REALM +ozone.s3g.http.auth.kerberos.keytab| /path/to/HTTP.keytab + +### 为 RECON HTTP 启用 SPNEGO 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +ozone.recon.http.auth.type | kerberos +ozone.recon.http.auth.kerberos.principal | HTTP/_HOST@REALM +ozone.recon.http.auth.kerberos.keytab| /path/to/HTTP.keytab + +### 为 SCM HTTP 启用 SPNEGO 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +hdds.scm.http.auth.type | kerberos +hdds.scm.http.auth.kerberos.principal | HTTP/_HOST@REALM +hdds.scm.http.auth.kerberos.keytab| /path/to/HTTP.keytab + +### 为 DATANODE HTTP 启用 SPNEGO 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +hdds.datanode.http.auth.type | kerberos +hdds.datanode.http.auth.kerberos.principal | HTTP/_HOST@REALM +hdds.datanode.http.auth.kerberos.keytab| /path/to/HTTP.keytab + +注意: Ozone datanode 没有默认网页,这会阻止您访问“/”或“/index.html”。但它通过 HTTP 提供了标准 Java Servlet,如 jmx/conf/jstack。 + +此外,Ozone HTTP Web 控制台支持相当于 Hadoop 的 Pseudo/Simple 身份验证。 如果启用此选项,则必须在第一次与浏览器交互中使用 user.name 指定用户名查询字符串参数。例如,http://scm:9876/?user.name=scmadmin。 + +### 为 OM HTTP 启用 SIMPLE 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +ozone.om.http.auth.type | simple +ozone.om.http.auth.simple.anonymous.allowed | false + +如果您不想在查询字符串参数中指定 user.name,更改 ozone.om.http.auth.simple.anonymous.allowed 为 true。 + +### 为 S3G HTTP 启用 SIMPLE 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +ozone.s3g.http.auth.type | simple +ozone.s3g.http.auth.simple.anonymous.allowed | false + +如果您不想在查询字符串参数中指定 user.name,更改 ozone.s3g.http.auth.simple.anonymous.allowed 为 true。 + +### 为 RECON HTTP 启用 SIMPLE 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +ozone.recon.http.auth.type | simple +ozone.recon.http.auth.simple.anonymous.allowed | false + +如果您不想在查询字符串参数中指定 user.name,更改 ozone.recon.http.auth.simple.anonymous.allowed 为 true。 + +### 为 SCM HTTP 启用 SIMPLE 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +hdds.scm.http.auth.type | simple +hdds.scm.http.auth.simple.anonymous.allowed | false + +如果您不想在查询字符串参数中指定 user.name,更改 hdds.scm.http.auth.simple.anonymous.allowed 为 true。 + +### 为 DATANODE HTTP 启用 SIMPLE 身份验证 +参数 | 值 +-----------------------------------|----------------------------------------- +hdds.datanode.http.auth.type | simple +hdds.datanode.http.auth.simple.anonymous.allowed | false + +如果您不想在查询字符串参数中指定 user.name,更改 hdds.datanode.http.auth.simple.anonymous.allowed 为 true。 diff --git a/hadoop-hdds/docs/content/tools/Admin.zh.md b/hadoop-hdds/docs/content/tools/Admin.zh.md new file mode 100644 index 000000000000..e95b76e444b3 --- /dev/null +++ b/hadoop-hdds/docs/content/tools/Admin.zh.md @@ -0,0 +1,35 @@ +--- +title: "Ozone Admin" +date: 2020-03-25 +summary: Ozone Admin 命令可以用于所有与管理有关的任务。 +--- + + +Ozone 管理命令(`ozone admin`)是一套专为管理员使用的工具集合。 + +关于可用功能的简要概述: + + * `ozone admin safemode`: 您可以检查安全模式状态并强制退出/进入安全模式,`--verbose` 选项将打印评估安全模式状态的所有规则的验证状态。 + * `ozone admin container`: 容器是复制的单元。子命令可帮助调试当前容器的状态(list/get/create/...)。 + * `ozone admin pipeline`: 可帮助检查可用的管道(datanode 集合)。 + * `ozone admin datanode`: 提供有关 datanode 的信息。 + * `ozone admin printTopology`: 显示与机架感知相关的信息。 + * `ozone admin replicationmanager`: 可用于检查复制的状态(并在紧急情况下启动/停止复制)。 + * `ozone admin om`: 用于获取有关当前集群的信息的 Ozone Manager HA 相关工具。 + +如需更详细的使用说明,请查看 `--help` 的输出。 diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index 6281fd749591..94f60ea4aa0c 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -51,30 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.rat - apache-rat-plugin - - - static/slides/* - themes/ozonedoc/static/js/bootstrap.min.js - themes/ozonedoc/static/js/jquery-3.5.1.min.js - themes/ozonedoc/static/js/swagger-ui-bundle.js - themes/ozonedoc/static/css/bootstrap-theme.min.css - - themes/ozonedoc/static/css/bootstrap.min.css.map - themes/ozonedoc/static/css/bootstrap.min.css - themes/ozonedoc/static/css/bootstrap-theme.min.css.map - - themes/ozonedoc/static/css/swagger-ui.css - - themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg - - themes/ozonedoc/layouts/index.html - themes/ozonedoc/theme.toml - - - diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml index 9ff328776657..c0272a3d76d3 100644 --- a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml +++ b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml @@ -530,6 +530,21 @@ paths: application/json: schema: $ref: '#/components/responses/DatanodesSummary' + + /datanodes/remove: + get: + tags: + - Datanodes + summary: Removes datanodes from Recon's memory and nodes table in Recon DB. + operationId: removeDatanodes + responses: + '200': + description: Successful Operation + content: + application/json: + schema: + $ref: '#/components/responses/RemovedDatanodesResponse' + /pipelines: get: tags: @@ -1514,6 +1529,8 @@ components: leaderCount: type: integer example: 1 + RemovedDatanodesResponse: + type: object PipelinesSummary: type: object properties: @@ -1659,4 +1676,4 @@ components: type: array example: - 1599159384.455 - - "5" \ No newline at end of file + - "5" diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 5ead355066d0..af0887ad16bf 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -83,8 +83,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> log4j-core + com.lmax disruptor + runtime org.eclipse.jetty diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java index ae74bc048af0..ae5214024808 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java @@ -28,13 +28,12 @@ import java.util.Map; import java.util.Properties; -import com.google.common.base.Strings; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.server.http.HttpServer2; import com.google.common.annotations.VisibleForTesting; -import com.google.gson.Gson; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -145,16 +144,15 @@ public BadFormatException(String msg) { private void processConfigTagRequest(HttpServletRequest request, String cmd, Writer out) throws IOException { - Gson gson = new Gson(); OzoneConfiguration config = getOzoneConfig(); switch (cmd) { case "getOzoneTags": - out.write(gson.toJson(OzoneConfiguration.TAGS)); + out.write(JsonUtils.toJsonString(OzoneConfiguration.TAGS)); break; case "getPropertyByTag": String tags = request.getParameter("tags"); - if (Strings.isNullOrEmpty(tags)) { + if (tags == null || tags.isEmpty()) { throw new IllegalArgumentException("The tags parameter should be set" + " when using the getPropertyByTag command."); } @@ -170,7 +168,7 @@ private void processConfigTagRequest(HttpServletRequest request, String cmd, } } } - out.write(gson.toJsonTree(propMap).toString()); + out.write(JsonUtils.toJsonString(propMap)); break; default: throw new IllegalArgumentException(cmd + " is not a valid command."); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java index 85acc1431fc8..0ab92cfee02c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.GetServerNameRequestProto; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.GetServerNameResponseProto; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.GetConfigurationChangeProto; @@ -82,26 +83,45 @@ public class ReconfigureProtocolClientSideTranslatorPB implements private final ReconfigureProtocolPB rpcProxy; - public ReconfigureProtocolClientSideTranslatorPB(InetSocketAddress addr, + public ReconfigureProtocolClientSideTranslatorPB(HddsProtos.NodeType nodeType, InetSocketAddress addr, UserGroupInformation ugi, OzoneConfiguration conf) throws IOException { - rpcProxy = createReconfigureProtocolProxy(addr, ugi, conf); + rpcProxy = createReconfigureProtocolProxy(nodeType, addr, ugi, conf); } - static ReconfigureProtocolPB createReconfigureProtocolProxy( + static ReconfigureProtocolPB createReconfigureProtocolProxy(HddsProtos.NodeType nodeType, InetSocketAddress addr, UserGroupInformation ugi, OzoneConfiguration conf) throws IOException { - - RPC.setProtocolEngine(OzoneConfiguration.of(conf), - ReconfigureProtocolPB.class, ProtobufRpcEngine.class); Configuration hadoopConf = LegacyHadoopConfigurationSource .asHadoopConfiguration(conf); - return RPC.getProtocolProxy( - ReconfigureProtocolPB.class, - RPC.getProtocolVersion(ReconfigureProtocolPB.class), - addr, ugi, hadoopConf, - NetUtils.getDefaultSocketFactory(hadoopConf)) - .getProxy(); + if (nodeType == HddsProtos.NodeType.OM) { + RPC.setProtocolEngine(OzoneConfiguration.of(conf), + ReconfigureProtocolOmPB.class, ProtobufRpcEngine.class); + return RPC.getProtocolProxy( + ReconfigureProtocolOmPB.class, + RPC.getProtocolVersion(ReconfigureProtocolOmPB.class), + addr, ugi, hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf)) + .getProxy(); + } else if (nodeType == HddsProtos.NodeType.DATANODE) { + RPC.setProtocolEngine(OzoneConfiguration.of(conf), + ReconfigureProtocolDatanodePB.class, ProtobufRpcEngine.class); + return RPC.getProtocolProxy( + ReconfigureProtocolDatanodePB.class, + RPC.getProtocolVersion(ReconfigureProtocolDatanodePB.class), + addr, ugi, hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf)) + .getProxy(); + } else { + RPC.setProtocolEngine(OzoneConfiguration.of(conf), + ReconfigureProtocolPB.class, ProtobufRpcEngine.class); + return RPC.getProtocolProxy( + ReconfigureProtocolPB.class, + RPC.getProtocolVersion(ReconfigureProtocolPB.class), + addr, ugi, hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf)) + .getProxy(); + } } @Override diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.h b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java similarity index 52% rename from hadoop-hdds/rocks-native/src/main/native/Pipe.h rename to hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java index aa75c6311cbc..49e95b9c26f2 100644 --- a/hadoop-hdds/rocks-native/src/main/native/Pipe.h +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,41 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.hdds.protocolPB; -#ifndef ROCKS_NATIVE_PIPE_H -#define ROCKS_NATIVE_PIPE_H +import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.security.KerberosInfo; -#include - -class Pipe { - public: - static const int READ_FILE_DESCRIPTOR_IDX; - static const int WRITE_FILE_DESCRIPTOR_IDX; - Pipe(); - ~Pipe(); - void close(); - int getReadFd() { - return getPipeFileDescriptorIndex(READ_FILE_DESCRIPTOR_IDX); - } - - int getWriteFd() { - return getPipeFileDescriptorIndex(WRITE_FILE_DESCRIPTOR_IDX); - } - - int getPipeFileDescriptorIndex(int idx) { - return p[idx]; - } - - bool isOpen() { - return open; - } - - - private: - int p[2]; - FILE* wr; - bool open; - -}; - -#endif //ROCKS_NATIVE_PIPE_H +/** + * Protocol that clients use to communicate with the DN to do + * reconfiguration on the fly. + */ +@ProtocolInfo( + protocolName = "org.apache.hadoop.hdds.protocol.ReconfigureProtocol", + protocolVersion = 1) +@KerberosInfo(serverPrincipal = DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) +public interface ReconfigureProtocolDatanodePB extends ReconfigureProtocolPB { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java new file mode 100644 index 000000000000..2775e71efa74 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.protocolPB; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.security.KerberosInfo; + +/** + * Protocol that clients use to communicate with the OM to do + * reconfiguration on the fly. + */ +@ProtocolInfo( + protocolName = "org.apache.hadoop.hdds.protocol.ReconfigureProtocol", + protocolVersion = 1) +// TODO: move OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY to hdds-common. +@KerberosInfo(serverPrincipal = "ozone.om.kerberos.principal") +public interface ReconfigureProtocolOmPB extends ReconfigureProtocolPB { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java index e1702ce0ada8..cb31a366ad7d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java @@ -23,7 +23,7 @@ import org.apache.hadoop.security.KerberosInfo; /** - * Protocol that clients use to communicate with the OM/SCM to do + * Protocol that clients use to communicate with the SCM to do * reconfiguration on the fly. */ @ProtocolInfo( diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java index 8db07cbc80f3..7a6a5a904244 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java @@ -45,7 +45,7 @@ * ReconfigureProtocol. */ public class ReconfigureProtocolServerSideTranslatorPB implements - ReconfigureProtocolPB { + ReconfigureProtocolPB, ReconfigureProtocolOmPB, ReconfigureProtocolDatanodePB { private final ReconfigureProtocol impl; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java new file mode 100644 index 000000000000..5e33eefde6c5 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.client; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT; + +/** + * This client implements a background thread which periodically checks and + * gets the latest network topology cluster tree from SCM. + */ +public class ScmTopologyClient { + private static final Logger LOG = + LoggerFactory.getLogger(ScmTopologyClient.class); + + private final ScmBlockLocationProtocol scmBlockLocationProtocol; + private final AtomicReference cache = + new AtomicReference<>(); + private ScheduledExecutorService executorService; + + public ScmTopologyClient( + ScmBlockLocationProtocol scmBlockLocationProtocol) { + this.scmBlockLocationProtocol = scmBlockLocationProtocol; + } + + public NetworkTopology getClusterMap() { + return requireNonNull(cache.get(), + "ScmBlockLocationClient must have been initialized already."); + } + + public void start(ConfigurationSource conf) throws IOException { + final InnerNode initialTopology = + scmBlockLocationProtocol.getNetworkTopology(); + LOG.info("Initial network topology fetched from SCM: {}.", + initialTopology); + cache.set(new NetworkTopologyImpl(conf.get( + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT), + initialTopology)); + scheduleNetworkTopologyPoller(conf, Instant.now()); + } + + public void stop() { + if (executorService != null) { + executorService.shutdown(); + try { + if (executorService.awaitTermination(5, TimeUnit.SECONDS)) { + executorService.shutdownNow(); + } + } catch (InterruptedException e) { + LOG.error("Interrupted while shutting down executor service.", e); + Thread.currentThread().interrupt(); + } + } + } + + private void scheduleNetworkTopologyPoller(ConfigurationSource conf, + Instant initialInvocation) { + Duration refreshDuration = parseRefreshDuration(conf); + Instant nextRefresh = initialInvocation.plus(refreshDuration); + ThreadFactory threadFactory = new ThreadFactoryBuilder() + .setNameFormat("NetworkTopologyPoller") + .setDaemon(true) + .build(); + executorService = Executors.newScheduledThreadPool(1, threadFactory); + Duration initialDelay = Duration.between(Instant.now(), nextRefresh); + + LOG.debug("Scheduling NetworkTopologyPoller with an initial delay of {}.", + initialDelay); + executorService.scheduleAtFixedRate(() -> checkAndRefresh(conf), + initialDelay.toMillis(), refreshDuration.toMillis(), + TimeUnit.MILLISECONDS); + } + + public static Duration parseRefreshDuration(ConfigurationSource conf) { + long refreshDurationInMs = conf.getTimeDuration( + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION, + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT, + TimeUnit.MILLISECONDS); + return Duration.ofMillis(refreshDurationInMs); + } + + private synchronized void checkAndRefresh(ConfigurationSource conf) { + InnerNode current = (InnerNode) cache.get().getNode(ROOT); + try { + InnerNode newTopology = scmBlockLocationProtocol.getNetworkTopology(); + if (!newTopology.equals(current)) { + cache.set(new NetworkTopologyImpl(conf.get( + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT), + newTopology)); + LOG.info("Updated network topology fetched from SCM: {}.", newTopology); + } + } catch (IOException e) { + throw new UncheckedIOException( + "Error fetching updated network topology from SCM", e); + } + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java new file mode 100644 index 000000000000..8dc9cb3cca2f --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *

+ * Freon related helper classes used for load testing. + */ + +/** + * Contains SCM client related classes. + */ +package org.apache.hadoop.hdds.scm.client; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeletedBlocksTransactionInfoWrapper.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeletedBlocksTransactionInfoWrapper.java index 64ced8dce4e2..be5c9a03e089 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeletedBlocksTransactionInfoWrapper.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeletedBlocksTransactionInfoWrapper.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdds.scm.container.common.helpers; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import java.util.List; @@ -31,8 +33,11 @@ public class DeletedBlocksTransactionInfoWrapper { private final List localIdList; private final int count; - public DeletedBlocksTransactionInfoWrapper(long txID, long containerID, - List localIdList, int count) { + @JsonCreator + public DeletedBlocksTransactionInfoWrapper(@JsonProperty("txID") long txID, + @JsonProperty("containerID") long containerID, + @JsonProperty("localIdList") List localIdList, + @JsonProperty("count") int count) { this.txID = txID; this.containerID = containerID; this.localIdList = localIdList; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java index ef2585488faa..8c84af859b4a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.net.InnerNode; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; @@ -138,4 +139,11 @@ List allocateBlock(long size, int numBlocks, */ List sortDatanodes(List nodes, String clientMachine) throws IOException; + + /** + * Retrieves the hierarchical cluster tree representing the network topology. + * @return the root node of the network topology cluster tree. + * @throws IOException + */ + InnerNode getNetworkTopology() throws IOException; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index 2e724969998b..1f114304ccaa 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; @@ -39,6 +40,8 @@ import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.GetClusterTreeRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.GetClusterTreeResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .SortDatanodesRequestProto; @@ -49,6 +52,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.Node; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; @@ -328,6 +334,43 @@ public List sortDatanodes(List nodes, return results; } + @Override + public InnerNode getNetworkTopology() throws IOException { + GetClusterTreeRequestProto request = + GetClusterTreeRequestProto.newBuilder().build(); + SCMBlockLocationRequest wrapper = createSCMBlockRequest(Type.GetClusterTree) + .setGetClusterTreeRequest(request) + .build(); + + final SCMBlockLocationResponse wrappedResponse = + handleError(submitRequest(wrapper)); + GetClusterTreeResponseProto resp = + wrappedResponse.getGetClusterTreeResponse(); + + return (InnerNode) setParent( + InnerNodeImpl.fromProtobuf(resp.getClusterTree())); + } + + /** + * Sets the parent field for the clusterTree nodes recursively. + * + * @param node cluster tree without parents set. + * @return updated cluster tree with parents set. + */ + private Node setParent(Node node) { + if (node instanceof InnerNodeImpl) { + InnerNodeImpl innerNode = (InnerNodeImpl) node; + if (innerNode.getChildrenMap() != null) { + for (Map.Entry child : innerNode.getChildrenMap() + .entrySet()) { + child.getValue().setParent(innerNode); + setParent(child.getValue()); + } + } + } + return node; + } + @Override public Object getUnderlyingProxyObject() { return rpcProxy; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 84a0fa4886ce..3570257b5855 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -68,6 +68,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto; @@ -526,15 +528,16 @@ public HddsProtos.Node queryNode(UUID uuid) throws IOException { /** * Attempts to decommission the list of nodes. * @param nodes The list of hostnames or hostname:ports to decommission + * @param force true to skip fail-early checks and try to decommission nodes * @throws IOException */ @Override - public List decommissionNodes(List nodes) + public List decommissionNodes(List nodes, boolean force) throws IOException { Preconditions.checkNotNull(nodes); DecommissionNodesRequestProto request = DecommissionNodesRequestProto.newBuilder() - .addAllHosts(nodes) + .addAllHosts(nodes).setForce(force) .build(); DecommissionNodesResponseProto response = submitRequest(Type.DecommissionNodes, @@ -583,12 +586,13 @@ public List recommissionNodes(List nodes) */ @Override public List startMaintenanceNodes( - List nodes, int endInHours) throws IOException { + List nodes, int endInHours, boolean force) throws IOException { Preconditions.checkNotNull(nodes); StartMaintenanceNodesRequestProto request = StartMaintenanceNodesRequestProto.newBuilder() .addAllHosts(nodes) .setEndInHours(endInHours) + .setForce(force) .build(); StartMaintenanceNodesResponseProto response = submitRequest(Type.StartMaintenanceNodes, @@ -900,7 +904,13 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException { + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException { StartContainerBalancerRequestProto.Builder builder = StartContainerBalancerRequestProto.newBuilder(); builder.setTraceID(TracingUtil.exportCurrentSpan()); @@ -909,29 +919,29 @@ public StartContainerBalancerResponseProto startContainerBalancer( if (threshold.isPresent()) { double tsd = threshold.get(); Preconditions.checkState(tsd >= 0.0D && tsd < 100D, - "threshold should be specified in range [0.0, 100.0)."); + "Threshold should be specified in the range [0.0, 100.0)."); builder.setThreshold(tsd); } if (maxSizeToMovePerIterationInGB.isPresent()) { long mstm = maxSizeToMovePerIterationInGB.get(); Preconditions.checkState(mstm > 0, - "maxSizeToMovePerIterationInGB must be positive."); + "Max Size To Move Per Iteration In GB must be positive."); builder.setMaxSizeToMovePerIterationInGB(mstm); } if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); Preconditions.checkState(mdti >= 0, - "maxDatanodesPercentageToInvolvePerIteration must be " + + "Max Datanodes Percentage To Involve Per Iteration must be " + "greater than equal to zero."); Preconditions.checkState(mdti <= 100, - "maxDatanodesPercentageToInvolvePerIteration must be " + + "Max Datanodes Percentage To Involve Per Iteration must be " + "lesser than equal to hundred."); builder.setMaxDatanodesPercentageToInvolvePerIteration(mdti); } if (iterations.isPresent()) { int i = iterations.get(); Preconditions.checkState(i > 0 || i == -1, - "number of iterations must be positive or" + + "Number of Iterations must be positive or" + " -1 (for running container balancer infinitely)."); builder.setIterations(i); } @@ -939,17 +949,53 @@ public StartContainerBalancerResponseProto startContainerBalancer( if (maxSizeEnteringTargetInGB.isPresent()) { long mset = maxSizeEnteringTargetInGB.get(); Preconditions.checkState(mset > 0, - "maxSizeEnteringTargetInGB must be positive."); + "Max Size Entering Target In GB must be positive."); builder.setMaxSizeEnteringTargetInGB(mset); } if (maxSizeLeavingSourceInGB.isPresent()) { long msls = maxSizeLeavingSourceInGB.get(); Preconditions.checkState(msls > 0, - "maxSizeLeavingSourceInGB must be positive."); + "Max Size Leaving Source In GB must be positive."); builder.setMaxSizeLeavingSourceInGB(msls); } + if (balancingInterval.isPresent()) { + int bi = balancingInterval.get(); + Preconditions.checkState(bi > 0, + "Balancing Interval must be greater than zero."); + builder.setBalancingInterval(bi); + } + + if (moveTimeout.isPresent()) { + int mt = moveTimeout.get(); + Preconditions.checkState(mt > 0, + "Move Timeout must be greater than zero."); + builder.setMoveTimeout(mt); + } + + if (moveReplicationTimeout.isPresent()) { + int mrt = moveReplicationTimeout.get(); + Preconditions.checkState(mrt > 0, + "Move Replication Timeout must be greater than zero."); + builder.setMoveReplicationTimeout(mrt); + } + + if (networkTopologyEnable.isPresent()) { + Boolean nt = networkTopologyEnable.get(); + builder.setNetworkTopologyEnable(nt); + } + + if (includeNodes.isPresent()) { + String in = includeNodes.get(); + builder.setIncludeNodes(in); + } + + if (excludeNodes.isPresent()) { + String ex = excludeNodes.get(); + builder.setExcludeNodes(ex); + } + StartContainerBalancerRequestProto request = builder.build(); return submitRequest(Type.StartContainerBalancer, builder1 -> builder1.setStartContainerBalancerRequest(request)) @@ -1143,4 +1189,13 @@ public DecommissionScmResponseProto decommissionScm( .getDecommissionScmResponse(); return response; } + + @Override + public String getMetrics(String query) throws IOException { + GetMetricsRequestProto request = GetMetricsRequestProto.newBuilder().setQuery(query).build(); + GetMetricsResponseProto response = submitRequest(Type.GetMetrics, + builder -> builder.setGetMetricsRequest(request)).getGetMetricsResponse(); + String metricsJsonStr = response.getMetricsJson(); + return metricsJsonStr; + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/CompositeTokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/CompositeTokenVerifier.java index 750bb7613f91..75059d2e7180 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/CompositeTokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/CompositeTokenVerifier.java @@ -36,11 +36,11 @@ public CompositeTokenVerifier(List delegates) { } @Override - public void verify(String user, Token token, + public void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) throws SCMSecurityException { for (TokenVerifier verifier : delegates) { - verifier.verify(user, token, cmd); + verifier.verify(token, cmd); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/NoopTokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/NoopTokenVerifier.java index 084b42efe90b..0095ed653bfc 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/NoopTokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/NoopTokenVerifier.java @@ -24,13 +24,13 @@ public class NoopTokenVerifier implements TokenVerifier { @Override - public void verify(String user, Token token, + public void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) { // no-op } @Override // to avoid "failed to find token" - public void verify(ContainerCommandRequestProtoOrBuilder cmd, String user, + public void verify(ContainerCommandRequestProtoOrBuilder cmd, String encodedToken) { // no-op } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java index b03c9de5f42e..1d7438cf89c1 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import java.io.IOException; @@ -64,7 +63,7 @@ protected void verify(T tokenId, ContainerCommandRequestProtoOrBuilder cmd) } @Override - public void verify(String user, Token token, + public void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) throws SCMSecurityException { if (!isTokenRequired(cmd.getCmdType())) { @@ -80,10 +79,9 @@ public void verify(String user, Token token, verifyTokenPassword(tokenId, token.getPassword()); - UserGroupInformation tokenUser = tokenId.getUser(); // check expiration if (tokenId.isExpired(Instant.now())) { - throw new BlockTokenException("Expired token for user: " + tokenUser); + throw new BlockTokenException("Expired token for user: " + tokenId.getUser()); } // check token service (blockID or containerID) @@ -91,7 +89,7 @@ public void verify(String user, Token token, if (!Objects.equals(service, tokenId.getService())) { throw new BlockTokenException("ID mismatch. Token for ID: " + tokenId.getService() + " can't be used to access: " + service + - " by user: " + tokenUser); + " by user: " + tokenId.getUser()); } verify(tokenId, cmd); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java index 4d06cbf15fe9..e7e029f70877 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java @@ -42,18 +42,17 @@ public interface TokenVerifier { * Verify if {@code token} is valid to allow execution of {@code cmd} for * {@code user}. * - * @param user user of the request * @param token the token to verify * @param cmd container command * @throws SCMSecurityException if token verification fails. */ - void verify(String user, Token token, + void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) throws SCMSecurityException; - /** Same as {@link #verify(String, Token, + /** Same as {@link #verify(Token, * ContainerCommandRequestProtoOrBuilder)}, but with encoded token. */ - default void verify(ContainerCommandRequestProtoOrBuilder cmd, String user, + default void verify(ContainerCommandRequestProtoOrBuilder cmd, String encodedToken) throws SCMSecurityException { if (Strings.isNullOrEmpty(encodedToken)) { @@ -68,7 +67,7 @@ default void verify(ContainerCommandRequestProtoOrBuilder cmd, String user, throw new BlockTokenException("Failed to decode token : " + encodedToken); } - verify(user, token, cmd); + verify(token, cmd); } /** Create appropriate token verifier based on the configuration. */ diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java index dc217476a60c..047386730818 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java @@ -21,8 +21,6 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableRate; -import com.google.common.annotations.VisibleForTesting; - /** * Metrics for any event watcher. */ @@ -56,23 +54,19 @@ public void updateFinishingTime(long duration) { completionTime.add(duration); } - @VisibleForTesting - public MutableCounterLong getTrackedEvents() { + MutableCounterLong getTrackedEvents() { return trackedEvents; } - @VisibleForTesting - public MutableCounterLong getTimedOutEvents() { + MutableCounterLong getTimedOutEvents() { return timedOutEvents; } - @VisibleForTesting - public MutableCounterLong getCompletedEvents() { + MutableCounterLong getCompletedEvents() { return completedEvents; } - @VisibleForTesting - public MutableRate getCompletionTime() { + MutableRate getCompletionTime() { return completionTime; } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java index 5d65634b447b..44c18231549a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java @@ -372,10 +372,12 @@ public static HttpServer2.Builder loadSslConfToHttpServerBuilder( .keyPassword(getPassword(sslConf, OZONE_SERVER_HTTPS_KEYPASSWORD_KEY)) .keyStore(sslConf.get("ssl.server.keystore.location"), getPassword(sslConf, OZONE_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY), - sslConf.get("ssl.server.keystore.type", "jks")) + sslConf.get(HddsConfigKeys.HDDS_HTTP_SERVER_KEYSTORE_TYPE, + HddsConfigKeys.HDDS_HTTP_SERVER_KEYSTORE_TYPE_DEFAULT)) .trustStore(sslConf.get("ssl.server.truststore.location"), getPassword(sslConf, OZONE_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY), - sslConf.get("ssl.server.truststore.type", "jks")) + sslConf.get(HddsConfigKeys.HDDS_HTTP_SERVER_TRUSTSTORE_TYPE, + HddsConfigKeys.HDDS_HTTP_SERVER_TRUSTSTORE_TYPE_DEFAULT)) .excludeCiphers( sslConf.get("ssl.server.exclude.cipher.list")); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/FaultInjector.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/FaultInjector.java index 32076abb3fb3..6be333c4c9a6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/FaultInjector.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/FaultInjector.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hdds.utils; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import java.io.IOException; @@ -49,4 +50,13 @@ public void setException(Throwable e) { public Throwable getException() { return null; } + + @VisibleForTesting + public void setType(ContainerProtos.Type type) { + } + + @VisibleForTesting + public ContainerProtos.Type getType() { + return null; + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 70d394e73b31..4fae3686c93c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -382,14 +382,14 @@ public static int getLogWarnInterval(ConfigurationSource conf) { * @return port number. */ public static int getContainerPort(ConfigurationSource conf) { - return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + return conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } public static Collection getOzoneDatanodeRatisDirectory( ConfigurationSource conf) { Collection rawLocations = conf.getTrimmedStringCollection( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); if (rawLocations.isEmpty()) { rawLocations = new ArrayList<>(1); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMetrics.java index 479e88ed3e9c..bec4a6f9bc80 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.utils; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -25,7 +26,6 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.Interns; -import org.bouncycastle.util.Strings; import org.rocksdb.HistogramData; import org.rocksdb.HistogramType; import org.rocksdb.LiveFileMetaData; @@ -257,7 +257,7 @@ private Map>> computeSstFileStat() Map sizeStat; for (LiveFileMetaData file : liveFileMetaDataList) { numStat = numStatPerCF.get(file.level()); - String cf = Strings.fromByteArray(file.columnFamilyName()); + String cf = StringUtils.bytes2String(file.columnFamilyName()); if (numStat != null) { Long value = numStat.get(cf); numStat.put(cf, value == null ? 1L : value + 1); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index 32fcbfec6e44..31089bc1c0b6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedLogger; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; @@ -405,12 +406,7 @@ private ManagedDBOptions getDefaultDBOptions( // Apply logging settings. if (rocksDBConfiguration.isRocksdbLoggingEnabled()) { - org.rocksdb.Logger logger = new org.rocksdb.Logger(dbOptions) { - @Override - protected void log(InfoLogLevel infoLogLevel, String s) { - ROCKS_DB_LOGGER.info(s); - } - }; + ManagedLogger logger = new ManagedLogger(dbOptions, (infoLogLevel, s) -> ROCKS_DB_LOGGER.info(s)); InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration .getRocksdbLogLevel() + "_LEVEL"); logger.setInfoLogLevel(level); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index 6760eb47f486..d5aa961b0e9e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -163,7 +163,7 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics stati rocksDBCheckpointDiffer.setCompactionLogTableCFHandle( compactionLogTableCF.getHandle()); // Set activeRocksDB in differ to access compaction log CF. - rocksDBCheckpointDiffer.setActiveRocksDB(db.getManagedRocksDb().get()); + rocksDBCheckpointDiffer.setActiveRocksDB(db.getManagedRocksDb()); // Load all previous compaction logs rocksDBCheckpointDiffer.loadAllCompactionLogs(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index 19f60d914f32..957e9d27c38c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -65,7 +65,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions.closeDeeply; import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator.managed; import static org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator.managed; @@ -848,8 +847,7 @@ public void deleteFilesNotMatchingPrefix(Map prefixPairs) throws IOException, RocksDBException { try (UncheckedAutoCloseable ignored = acquire()) { for (LiveFileMetaData liveFileMetaData : getSstFileList()) { - String sstFileColumnFamily = - new String(liveFileMetaData.columnFamilyName(), UTF_8); + String sstFileColumnFamily = StringUtils.bytes2String(liveFileMetaData.columnFamilyName()); int lastLevel = getLastLevel(); if (!prefixPairs.containsKey(sstFileColumnFamily)) { @@ -867,8 +865,8 @@ public void deleteFilesNotMatchingPrefix(Map prefixPairs) } String prefixForColumnFamily = prefixPairs.get(sstFileColumnFamily); - String firstDbKey = new String(liveFileMetaData.smallestKey(), UTF_8); - String lastDbKey = new String(liveFileMetaData.largestKey(), UTF_8); + String firstDbKey = StringUtils.bytes2String(liveFileMetaData.smallestKey()); + String lastDbKey = StringUtils.bytes2String(liveFileMetaData.largestKey()); boolean isKeyWithPrefixPresent = RocksDiffUtils.isKeyWithPrefixPresent( prefixForColumnFamily, firstDbKey, lastDbKey); if (!isKeyWithPrefixPresent) { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestHddsConfServlet.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestHddsConfServlet.java index ad95ce1632d9..87038e004c29 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestHddsConfServlet.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestHddsConfServlet.java @@ -27,7 +27,7 @@ import static org.mockito.Mockito.when; import com.google.common.base.Strings; -import com.google.gson.Gson; +import org.apache.hadoop.hdds.JsonTestUtils; import org.apache.hadoop.hdds.server.http.HttpServer2; import org.apache.hadoop.util.XMLUtils; import org.eclipse.jetty.util.ajax.JSON; @@ -109,8 +109,7 @@ public void testGetPropertyWithCmd() throws Exception { conf.getObject(OzoneTestConfig.class); // test cmd is getOzoneTags String result = getResultWithCmd(conf, "getOzoneTags"); - Gson gson = new Gson(); - String tags = gson.toJson(OzoneConfiguration.TAGS); + String tags = JsonTestUtils.toJsonString(OzoneConfiguration.TAGS); assertEquals(result, tags); // cmd is getPropertyByTag result = getResultWithCmd(conf, "getPropertyByTag"); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java index d653c6af7936..36f405fe691a 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java @@ -152,7 +152,7 @@ public void tokenCanBeUsedForSpecificBlock() throws Exception { .build(); // THEN - tokenVerifier.verify("testUser", token, putBlockCommand); + tokenVerifier.verify(token, putBlockCommand); } @Test @@ -172,7 +172,7 @@ public void tokenCannotBeUsedForOtherBlock() throws Exception { // THEN BlockTokenException e = assertThrows(BlockTokenException.class, - () -> tokenVerifier.verify("testUser", token, writeChunkRequest)); + () -> tokenVerifier.verify(token, writeChunkRequest)); assertThat(e.getMessage()).contains("Token for ID: " + OzoneBlockTokenIdentifier.getTokenService(blockID) + @@ -200,12 +200,12 @@ public void testBlockTokenReadAccessMode() throws Exception { pipeline, putBlockCommand.getPutBlock()); BlockTokenException e = assertThrows(BlockTokenException.class, - () -> tokenVerifier.verify(testUser1, token, putBlockCommand)); + () -> tokenVerifier.verify(token, putBlockCommand)); assertThat(e.getMessage()) .contains("doesn't have WRITE permission"); - tokenVerifier.verify(testUser1, token, getBlockCommand); + tokenVerifier.verify(token, getBlockCommand); } @Test @@ -223,10 +223,10 @@ public void testBlockTokenWriteAccessMode() throws Exception { ContainerCommandRequestProto readChunkRequest = getReadChunkRequest(pipeline, writeChunkRequest.getWriteChunk()); - tokenVerifier.verify(testUser2, token, writeChunkRequest); + tokenVerifier.verify(token, writeChunkRequest); BlockTokenException e = assertThrows(BlockTokenException.class, - () -> tokenVerifier.verify(testUser2, token, readChunkRequest)); + () -> tokenVerifier.verify(token, readChunkRequest)); assertThat(e.getMessage()) .contains("doesn't have READ permission"); } @@ -243,14 +243,14 @@ public void testExpiredSecretKey() throws Exception { .setEncodedToken(token.encodeToUrlString()) .build(); - tokenVerifier.verify("testUser", token, writeChunkRequest); + tokenVerifier.verify(token, writeChunkRequest); // Mock client with an expired cert ManagedSecretKey expiredSecretKey = generateExpiredSecretKey(); when(secretKeyClient.getSecretKey(any())).thenReturn(expiredSecretKey); BlockTokenException e = assertThrows(BlockTokenException.class, - () -> tokenVerifier.verify(user, token, writeChunkRequest)); + () -> tokenVerifier.verify(token, writeChunkRequest)); assertThat(e.getMessage()) .contains("Token can't be verified due to expired secret key"); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java index 6e76f4c12eef..40c09265acb6 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java @@ -90,7 +90,7 @@ public void skipsVerificationIfDisabled() throws IOException { TokenVerifier subject = newTestSubject(tokenDisabled(), secretKeyClient); // WHEN - subject.verify("anyUser", anyToken(), verifiedRequest(newTokenId())); + subject.verify(anyToken(), verifiedRequest(newTokenId())); // THEN verify(secretKeyClient, never()).getSecretKey(any()); @@ -104,7 +104,7 @@ public void skipsVerificationForMiscCommands() throws IOException { TokenVerifier subject = newTestSubject(tokenEnabled(), secretKeyClient); // WHEN - subject.verify("anyUser", anyToken(), unverifiedRequest()); + subject.verify(anyToken(), unverifiedRequest()); // THEN verify(secretKeyClient, never()).getSecretKey(any()); @@ -130,7 +130,7 @@ public void rejectsExpiredSecretKey() throws Exception { ShortLivedTokenSecretManager secretManager = new MockTokenManager(); Token token = secretManager.generateToken(tokenId); BlockTokenException ex = assertThrows(BlockTokenException.class, () -> - subject.verify("anyUser", token, cmd)); + subject.verify(token, cmd)); assertThat(ex.getMessage()).contains("expired secret key"); } @@ -149,7 +149,7 @@ public void rejectsTokenWithInvalidSecretId() throws Exception { ShortLivedTokenSecretManager secretManager = new MockTokenManager(); Token token = secretManager.generateToken(tokenId); BlockTokenException ex = assertThrows(BlockTokenException.class, () -> - subject.verify("anyUser", token, cmd)); + subject.verify(token, cmd)); assertThat(ex.getMessage()) .contains("Can't find the signing secret key"); } @@ -169,7 +169,7 @@ public void rejectsInvalidSignature() throws Exception { // WHEN+THEN BlockTokenException ex = assertThrows(BlockTokenException.class, () -> - subject.verify("anyUser", invalidToken, cmd)); + subject.verify(invalidToken, cmd)); assertThat(ex.getMessage()) .contains("Invalid token for user"); } @@ -201,7 +201,7 @@ public void rejectsExpiredToken() throws Exception { // WHEN+THEN BlockTokenException ex = assertThrows(BlockTokenException.class, () -> - subject.verify("anyUser", token, cmd)); + subject.verify(token, cmd)); assertThat(ex.getMessage()) .contains("Expired token for user"); } @@ -219,7 +219,7 @@ public void acceptsValidToken() throws Exception { TokenVerifier subject = newTestSubject(conf, secretKeyClient); // WHEN+THEN - subject.verify("anyUser", token, cmd); + subject.verify(token, cmd); } private T expired(T tokenId) { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestJsonUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestJsonUtils.java index 7dc63a78a953..303cb460f453 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestJsonUtils.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestJsonUtils.java @@ -19,6 +19,7 @@ import java.io.IOException; +import org.apache.hadoop.hdds.JsonTestUtils; import org.apache.hadoop.hdds.client.OzoneQuota; import org.junit.jupiter.api.Test; @@ -35,13 +36,13 @@ public void printObjectAsJson() throws IOException { OzoneQuota spaceQuota = OzoneQuota.parseSpaceQuota("123MB"); String spaceStr = - JsonUtils.toJsonStringWithDefaultPrettyPrinter(spaceQuota); + JsonTestUtils.toJsonStringWithDefaultPrettyPrinter(spaceQuota); assertContains(spaceStr, "\"rawSize\" : 123"); assertContains(spaceStr, "\"unit\" : \"MB\""); OzoneQuota nameSpace = OzoneQuota.parseNameSpaceQuota("1000"); String nameSpaceStr = - JsonUtils.toJsonStringWithDefaultPrettyPrinter(nameSpace); + JsonTestUtils.toJsonStringWithDefaultPrettyPrinter(nameSpace); assertContains(nameSpaceStr, "\"quotaInNamespace\" : 1000"); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java index 504c3dd47f32..8095c1cbb1f4 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java @@ -48,6 +48,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -55,7 +56,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Tests for RocksDBTable Store. @@ -96,11 +96,7 @@ public static void initConstants() { private static boolean consume(Table.KeyValue keyValue) { count++; - try { - assertNotNull(keyValue.getKey()); - } catch (IOException ex) { - fail("Unexpected Exception " + ex); - } + assertNotNull(assertDoesNotThrow(keyValue::getKey)); return true; } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java index 9e16ebb99e19..f437d6518c5f 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java @@ -21,6 +21,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -29,7 +30,6 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.File; @@ -217,11 +217,7 @@ public void batchDelete() throws Exception { private static boolean consume(Table.KeyValue keyValue) { count++; - try { - assertNotNull(keyValue.getKey()); - } catch (IOException ex) { - fail(ex.toString()); - } + assertNotNull(assertDoesNotThrow(keyValue::getKey)); return true; } diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 85ae7bd4b201..5d0ca946aeed 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -43,6 +43,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + com.nimbusds + nimbus-jose-jwt + + + org.xerial.snappy + snappy-java + + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + org.apache.hadoop hadoop-annotations @@ -99,6 +111,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.pjfanning jersey-json + + com.google.code.findbugs + jsr305 + com.sun.jersey jersey-core @@ -194,12 +210,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + + com.nimbusds + nimbus-jose-jwt + org.apache.hadoop hadoop-hdfs ${hadoop.version} compile + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + com.google.guava guava @@ -286,5 +314,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.xerial.snappy + snappy-java + diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index abee8cc400fb..28d098edba29 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -43,6 +43,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + com.nimbusds + nimbus-jose-jwt + + + org.xerial.snappy + snappy-java + + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + org.apache.curator * @@ -79,6 +91,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.pjfanning jersey-json + + com.google.code.findbugs + jsr305 + com.sun.jersey jersey-json @@ -101,6 +117,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + + com.nimbusds + nimbus-jose-jwt + commons-cli @@ -112,6 +136,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${hadoop.version} compile + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + io.netty * @@ -134,5 +162,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.kerby + kerb-core + + + org.apache.kerby + kerb-util + + + org.xerial.snappy + snappy-java + diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index d194670acc36..c7b2776094c4 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -76,6 +76,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.junit.jupiter junit-jupiter-api + + + org.junit.jupiter + junit-jupiter-engine + org.junit.jupiter junit-jupiter-params @@ -85,6 +90,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.mockito mockito-core + + org.mockito + mockito-inline + org.mockito mockito-junit-jupiter diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 6adca817ed1d..c190dc3f4517 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -83,6 +83,7 @@ message ScmContainerLocationRequest { optional DecommissionScmRequestProto decommissionScmRequest = 44; optional SingleNodeQueryRequestProto singleNodeQueryRequest = 45; optional GetContainersOnDecomNodeRequestProto getContainersOnDecomNodeRequest = 46; + optional GetMetricsRequestProto getMetricsRequest = 47; } message ScmContainerLocationResponse { @@ -137,6 +138,7 @@ message ScmContainerLocationResponse { optional DecommissionScmResponseProto decommissionScmResponse = 44; optional SingleNodeQueryResponseProto singleNodeQueryResponse = 45; optional GetContainersOnDecomNodeResponseProto getContainersOnDecomNodeResponse = 46; + optional GetMetricsResponseProto getMetricsResponse = 47; enum Status { OK = 1; @@ -190,6 +192,7 @@ enum Type { DecommissionScm = 40; SingleNodeQuery = 41; GetContainersOnDecomNode = 42; + GetMetrics = 43; } /** @@ -359,6 +362,7 @@ message DatanodeUsageInfoResponseProto { */ message DecommissionNodesRequestProto { repeated string hosts = 1; + optional bool force = 2; } @@ -388,6 +392,7 @@ message RecommissionNodesResponseProto { message StartMaintenanceNodesRequestProto { repeated string hosts = 1; optional int64 endInHours = 2; + optional bool force = 3; } message StartMaintenanceNodesResponseProto { @@ -574,6 +579,12 @@ message StartContainerBalancerRequestProto { optional int64 maxSizeLeavingSourceInGB = 7; optional int32 maxDatanodesPercentageToInvolvePerIteration = 8; optional int32 iterations = 9; + optional int32 balancingInterval = 10; + optional int32 moveTimeout = 11; + optional int32 moveReplicationTimeout = 12; + optional bool networkTopologyEnable = 13; + optional string includeNodes = 14; + optional string excludeNodes = 15; } message StartContainerBalancerResponseProto { @@ -618,6 +629,14 @@ message GetContainersOnDecomNodeResponseProto { repeated ContainersOnDecomNodeProto containersOnDecomNode = 1; } +message GetMetricsRequestProto { + optional string query = 1; +} + +message GetMetricsResponseProto { + optional string metricsJson = 1; +} + /** * Protocol used from an HDFS node to StorageContainerManager. See the request * and response messages for details of the RPC calls. diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index 76fdfad111a9..2160f7c5edbf 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -51,11 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> javax.annotation javax.annotation-api - - com.google.code.findbugs - jsr305 - compile - diff --git a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto index 0206a8ea71d4..6cfae24d41e7 100644 --- a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto +++ b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto @@ -107,6 +107,7 @@ enum Type { StreamWrite = 20; FinalizeBlock = 21; + Echo = 22; } @@ -215,6 +216,7 @@ message ContainerCommandRequestProto { optional uint32 version = 24; optional FinalizeBlockRequestProto finalizeBlock = 25; + optional EchoRequestProto echo = 26; } message ContainerCommandResponseProto { @@ -247,6 +249,7 @@ message ContainerCommandResponseProto { optional GetCommittedBlockLengthResponseProto getCommittedBlockLength = 21; optional FinalizeBlockResponseProto finalizeBlock = 22; + optional EchoResponseProto echo = 23; } message ContainerDataProto { @@ -390,6 +393,17 @@ message ListBlockResponseProto { repeated BlockData blockData = 1; } +message EchoRequestProto { + optional bytes payload = 1; + optional int32 payloadSizeResp = 2; + optional int32 sleepTimeMs = 3; + optional bool readOnly = 4; +} + +message EchoResponseProto { + optional bytes payload = 1; +} + // Chunk Operations message ChunkInfo { @@ -423,9 +437,11 @@ message WriteChunkRequestProto { required DatanodeBlockID blockID = 1; optional ChunkInfo chunkData = 2; optional bytes data = 3; + optional PutBlockRequestProto block = 4; } message WriteChunkResponseProto { + optional GetCommittedBlockLengthResponseProto committedBlockLength = 1; } enum ReadChunkVersion { diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 3f346300b3ed..4555d1cf4a39 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -45,8 +45,10 @@ message DatanodeDetailsProto { optional string networkLocation = 7; // Network topology location optional NodeOperationalState persistedOpState = 8; // The Operational state persisted in the datanode.id file optional int64 persistedOpStateExpiry = 9; // The seconds after the epoch when the OpState should expire + optional int32 currentVersion = 10; // Current datanode wire version // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. + optional uint32 level = 101; } /** @@ -497,3 +499,26 @@ message CompactionLogEntryProto { repeated CompactionFileInfoProto outputFileIntoList = 4; optional string compactionReason = 5; } + +message NodeTopology { + optional string name = 1; + optional string location = 2; + optional uint32 cost = 3; + optional uint32 level = 4; +} + +message NetworkNode { + optional DatanodeDetailsProto datanodeDetails = 1; + optional InnerNode innerNode = 3; +} + +message ChildrenMap { + optional string networkName = 1; + optional NetworkNode networkNode = 2; +} + +message InnerNode { + optional NodeTopology nodeTopology = 1; + optional uint32 numOfLeaves = 2; + repeated ChildrenMap childrenMap = 3; +} diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto index 307c23a56202..3d281975f2b4 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto @@ -30,7 +30,6 @@ package hadoop.hdds.block; import "hdds.proto"; - // SCM Block protocol enum Type { @@ -39,6 +38,7 @@ enum Type { GetScmInfo = 13; SortDatanodes = 14; AddScm = 15; + GetClusterTree = 16; } message SCMBlockLocationRequest { @@ -56,6 +56,7 @@ message SCMBlockLocationRequest { optional hadoop.hdds.GetScmInfoRequestProto getScmInfoRequest = 13; optional SortDatanodesRequestProto sortDatanodesRequest = 14; optional hadoop.hdds.AddScmRequestProto addScmRequestProto = 15; + optional GetClusterTreeRequestProto getClusterTreeRequest = 16; } message SCMBlockLocationResponse { @@ -80,6 +81,7 @@ message SCMBlockLocationResponse { optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse = 13; optional SortDatanodesResponseProto sortDatanodesResponse = 14; optional hadoop.hdds.AddScmResponseProto addScmResponse = 15; + optional GetClusterTreeResponseProto getClusterTreeResponse = 16; } /** @@ -230,6 +232,13 @@ message SortDatanodesResponseProto{ repeated DatanodeDetailsProto node = 1; } +message GetClusterTreeRequestProto { +} + +message GetClusterTreeResponseProto { + required InnerNode clusterTree = 1; +} + /** * Protocol used from OzoneManager to StorageContainerManager. * See request and response messages for details of the RPC calls. diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java new file mode 100644 index 000000000000..0d79a1c833d0 --- /dev/null +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + + +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; + +/** + * Class to write the rocksdb lib name to a file. + * This would be used to build native ozone_rocksdb_tools library. + */ +public final class JniLibNamePropertyWriter { + + private JniLibNamePropertyWriter() { + } + + public static void main(String[] args) { + String filePath = args[0]; + try (Writer writer = new OutputStreamWriter( + Files.newOutputStream(Paths.get(filePath)), StandardCharsets.UTF_8)) { + String libName = ManagedRocksObjectUtils.getRocksDBLibFileName(); + writer.write("rocksdbLibName=" + libName); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java index 638739ff557e..4eb2a0d2bc36 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java @@ -18,20 +18,34 @@ */ package org.apache.hadoop.hdds.utils.db.managed; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.DBOptions; +import org.rocksdb.Logger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.LOG; import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; /** * Managed DBOptions. */ public class ManagedDBOptions extends DBOptions { + private final UncheckedAutoCloseable leakTracker = track(this); + private final AtomicReference loggerRef = new AtomicReference<>(); + + @Override + public DBOptions setLogger(Logger logger) { + IOUtils.close(LOG, loggerRef.getAndSet(logger)); + return super.setLogger(logger); + } @Override public void close() { try { + IOUtils.close(LOG, loggerRef.getAndSet(null)); super.close(); } finally { leakTracker.close(); diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java new file mode 100644 index 000000000000..d04f91cd4e29 --- /dev/null +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.ratis.util.UncheckedAutoCloseable; +import org.rocksdb.InfoLogLevel; +import org.rocksdb.Logger; + +import java.util.function.BiConsumer; + +import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; + +/** Managed {@link Logger}. */ +public class ManagedLogger extends Logger { + + private final UncheckedAutoCloseable leakTracker = track(this); + private final BiConsumer delegate; + + public ManagedLogger(ManagedDBOptions dbOptions, BiConsumer delegate) { + super(dbOptions); + this.delegate = delegate; + } + + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + delegate.accept(infoLogLevel, logMsg); + } + + @Override + public void close() { + try { + super.close(); + } finally { + leakTracker.close(); + } + } +} diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index 9c86a47d7401..148abee7fc0e 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.LeakDetector; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.RocksDB; +import org.rocksdb.util.Environment; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -94,4 +95,11 @@ public static void waitForFileDelete(File file, Duration maxDuration) public static void loadRocksDBLibrary() { RocksDB.loadLibrary(); } + + /** + * Returns RocksDB library file name. + */ + public static String getRocksDBLibFileName() { + return Environment.getJniLibraryFileName("rocksdb"); + } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReader.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReader.java index b49c6e7a9e49..38d09e601d26 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReader.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReader.java @@ -18,18 +18,29 @@ */ package org.apache.hadoop.hdds.utils.db.managed; +import org.apache.ratis.util.UncheckedAutoCloseable; +import org.rocksdb.Options; import org.rocksdb.SstFileReader; +import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; + /** * Managed SstFileReader. */ -public class ManagedSstFileReader extends ManagedObject { +public class ManagedSstFileReader extends SstFileReader { + + private final UncheckedAutoCloseable leakTracker = track(this); - ManagedSstFileReader(SstFileReader original) { - super(original); + public ManagedSstFileReader(final Options options) { + super(options); } - public static ManagedSstFileReader managed(SstFileReader reader) { - return new ManagedSstFileReader(reader); + @Override + public void close() { + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index a308158c404c..567f432b3881 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -234,45 +234,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - org.apache.rat - apache-rat-plugin - - - **/*.json - **/hs_err*.log - **/.attach_* - **/**.rej - **/.factorypath - public - **/*.iml - **/target/** - **/output.xml - **/log.html - **/report.html - .gitattributes - .idea/** - src/main/resources/webapps/static/angular-1.8.0.min.js - src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js - src/main/resources/webapps/static/angular-route-1.8.0.min.js - src/main/resources/webapps/static/d3-3.5.17.min.js - src/main/resources/webapps/static/nvd3-1.8.5.min.css.map - src/main/resources/webapps/static/nvd3-1.8.5.min.css - src/main/resources/webapps/static/nvd3-1.8.5.min.js.map - src/main/resources/webapps/static/nvd3-1.8.5.min.js - src/main/resources/webapps/static/jquery-3.5.1.min.js - src/main/resources/webapps/static/bootstrap-3.4.1/** - src/test/resources/additionalfields.container - src/test/resources/incorrect.checksum.container - src/test/resources/incorrect.container - src/test/resources/test.db.ini - src/test/resources/123-dn-container.db/** - src/test/resources/123.container - src/main/resources/proto.lock - - - org.apache.maven.plugins maven-jar-plugin diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index c12ddbb091bd..60775a88e6d2 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -45,18 +45,11 @@ hdds-test-utils test - - org.mockito - mockito-inline - test - 8 8 - https://sourceware.org/pub/bzip2/bzip2-${bzip2.version}.tar.gz - https://zlib.net/fossils/zlib-${zlib.version}.tar.gz @@ -113,79 +106,80 @@ - com.googlecode.maven-download-plugin - download-maven-plugin + org.codehaus.mojo + exec-maven-plugin - rocksdb source download - generate-sources + set-property + initialize - wget - - - https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz - rocksdb-v${rocksdb.version}.tar.gz - ${project.build.directory}/rocksdb - - - - zlib source download - generate-sources - - wget - - - ${zlib.url} - zlib-${zlib.version}.tar.gz - ${project.build.directory}/zlib - - - - bzip2 source download - generate-sources - - wget + java - ${bzip2.url} - bzip2-v${bzip2.version}.tar.gz - ${project.build.directory}/bzip2 + org.apache.hadoop.hdds.utils.db.managed.JniLibNamePropertyWriter + + ${project.build.directory}/propertyFile.txt + + + + + org.codehaus.mojo + properties-maven-plugin + - lz4 source download - generate-sources + read-property-from-file + initialize - wget + read-project-properties - https://github.com/lz4/lz4/archive/refs/tags/v${lz4.version}.tar.gz - lz4-v${lz4.version}.tar.gz - ${project.build.directory}/lz4 + + ${project.build.directory}/propertyFile.txt + + + + + org.apache.maven.plugins + maven-dependency-plugin + - snappy source download - generate-sources + unpack-dependency + initialize - wget + unpack - https://github.com/google/snappy/archive/refs/tags/${snappy.version}.tar.gz - snappy-v${snappy.version}.tar.gz - ${project.build.directory}/snappy + + + org.rocksdb + rocksdbjni + jar + false + ${project.build.directory}/rocksdbjni + + + + + + com.googlecode.maven-download-plugin + download-maven-plugin + - zstd source download + rocksdb source download generate-sources wget - https://github.com/facebook/zstd/archive/refs/tags/v${zstd.version}.tar.gz - zstd-v${zstd.version}.tar.gz - ${project.build.directory}/zstd + https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz + rocksdb-v${rocksdb.version}.tar.gz + ${project.build.directory}/rocksdb @@ -219,89 +213,6 @@ - - - - - - - - - - run - - - - build-zlib - process-sources - - - - - - - - - - - - run - - - - build-bzip2 - process-sources - - - - - - - - - run - - - - build-lz4 - process-sources - - - - - - - - - run - - - - build-zstd - process-sources - - - - - - - - - run - - - - build-snappy - process-sources - - - - - - - - - @@ -319,11 +230,11 @@ + - - + - + @@ -337,6 +248,8 @@ + @@ -346,14 +259,12 @@ - - - - - - + + + @@ -423,8 +334,8 @@ ${env.JAVA_HOME}/bin/javah - org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool - org.apache.hadoop.hdds.utils.db.managed.PipeInputStream + org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader + org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator ${project.build.directory}/native/javah @@ -479,8 +390,8 @@ ${project.build.outputDirectory}:${project.build.directory}/dependency/* -h ${project.build.directory}/native/javah - ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java - ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java + ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java + ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java diff --git a/hadoop-hdds/rocks-native/src/CMakeLists.txt b/hadoop-hdds/rocks-native/src/CMakeLists.txt index 051660777493..eb4fb8d46fd7 100644 --- a/hadoop-hdds/rocks-native/src/CMakeLists.txt +++ b/hadoop-hdds/rocks-native/src/CMakeLists.txt @@ -21,6 +21,7 @@ # cmake_minimum_required(VERSION 2.8) +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") project(ozone_native) @@ -29,50 +30,33 @@ find_package(JNI REQUIRED) include_directories(${JNI_INCLUDE_DIRS}) set(CMAKE_CXX_STANDARD ${CMAKE_STANDARDS}) -set(linked_libraries "") +set(CMAKE_SKIP_BUILD_RPATH FALSE) + +set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) + +set(CMAKE_INSTALL_RPATH "") + +set(CMAKE_INSTALL_RPATH_USE_LINK_PATH FALSE) + if(NOT GENERATED_JAVAH) message(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH") endif() include_directories(${GENERATED_JAVAH}) if(${SST_DUMP_INCLUDE}) include_directories(${ROCKSDB_HEADERS}) - set(SOURCE_FILES ${NATIVE_DIR}/SSTDumpTool.cpp ${NATIVE_DIR}/PipeInputStream.cpp ${NATIVE_DIR}/Pipe.h ${NATIVE_DIR}/Pipe.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h) - ADD_LIBRARY(rocksdb STATIC IMPORTED) - set_target_properties( - rocksdb - PROPERTIES - IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb.a) + set(SOURCE_FILES ${NATIVE_DIR}/ManagedRawSSTFileReader.cpp ${NATIVE_DIR}/ManagedRawSSTFileIterator.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h) ADD_LIBRARY(rocks_tools STATIC IMPORTED) set_target_properties( rocks_tools PROPERTIES - IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb_tools.a) - ADD_LIBRARY(bz2 STATIC IMPORTED) - set_target_properties( - bz2 - PROPERTIES - IMPORTED_LOCATION ${BZIP2_LIB}/libbz2.a) - ADD_LIBRARY(zlib STATIC IMPORTED) - set_target_properties( - zlib - PROPERTIES - IMPORTED_LOCATION ${ZLIB_LIB}/libz.a) - ADD_LIBRARY(lz4 STATIC IMPORTED) - set_target_properties( - lz4 - PROPERTIES - IMPORTED_LOCATION ${LZ4_LIB}/liblz4.a) - ADD_LIBRARY(snappy STATIC IMPORTED) - set_target_properties( - snappy - PROPERTIES - IMPORTED_LOCATION ${SNAPPY_LIB}/libsnappy.a) - ADD_LIBRARY(zstd STATIC IMPORTED) - set_target_properties( - zstd - PROPERTIES - IMPORTED_LOCATION ${ZSTD_LIB}/libzstd.a) - set(linked_libraries ${linked_libraries} bz2 zlib rocks_tools rocksdb lz4 snappy zstd) + IMPORTED_LOCATION ${ROCKSDB_TOOLS_LIB}/librocksdb_tools.a) endif() + add_library(ozone_rocksdb_tools SHARED ${SOURCE_FILES}) -target_link_libraries(ozone_rocksdb_tools ${linked_libraries}) + + +target_link_libraries(ozone_rocksdb_tools PRIVATE ${ROCKSDB_LIB}) +target_link_libraries(ozone_rocksdb_tools PRIVATE rocks_tools) +set_target_properties(ozone_rocksdb_tools PROPERTIES + BUILD_WITH_INSTALL_RPATH FALSE + LINK_FLAGS "-Wl,-rpath -Wl,'$ORIGIN'") diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java index d3121144d37a..8937f0803a18 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java @@ -26,6 +26,5 @@ public final class NativeConstants { private NativeConstants() { } - public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME - = "ozone_rocksdb_tools"; + public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME = "ozone_rocksdb_tools"; } diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java index 10df236f88d4..ce424c930e1c 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.utils; import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.util.ShutdownHookManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,6 +29,8 @@ import java.io.InputStream; import java.nio.file.Files; import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -101,7 +104,7 @@ public static boolean isLibraryLoaded(final String libraryName) { .getOrDefault(libraryName, false); } - public synchronized boolean loadLibrary(final String libraryName) { + public synchronized boolean loadLibrary(final String libraryName, final List dependentFiles) { if (isLibraryLoaded(libraryName)) { return true; } @@ -116,9 +119,9 @@ public synchronized boolean loadLibrary(final String libraryName) { } if (!loaded) { - Optional file = copyResourceFromJarToTemp(libraryName); - if (file.isPresent()) { - System.load(file.get().getAbsolutePath()); + Pair, List> files = copyResourceFromJarToTemp(libraryName, dependentFiles); + if (files.getKey().isPresent()) { + System.load(files.getKey().get().getAbsolutePath()); loaded = true; } } @@ -137,19 +140,20 @@ static String getSystemProperty(String property) { // Added function to make this testable @VisibleForTesting - static InputStream getResourceStream(String libraryFileName) { + static InputStream getResourceStream(String libraryFileName) throws IOException { return NativeLibraryLoader.class.getClassLoader() .getResourceAsStream(libraryFileName); } - private Optional copyResourceFromJarToTemp(final String libraryName) + private Pair, List> copyResourceFromJarToTemp(final String libraryName, + final List dependentFileNames) throws IOException { final String libraryFileName = getJniLibraryFileName(libraryName); InputStream is = null; try { is = getResourceStream(libraryFileName); if (is == null) { - return Optional.empty(); + return Pair.of(Optional.empty(), null); } final String nativeLibDir = @@ -160,15 +164,28 @@ private Optional copyResourceFromJarToTemp(final String libraryName) // create a temporary file to copy the library to final File temp = File.createTempFile(libraryName, getLibOsSuffix(), dir); if (!temp.exists()) { - return Optional.empty(); + return Pair.of(Optional.empty(), null); } else { temp.deleteOnExit(); } Files.copy(is, temp.toPath(), StandardCopyOption.REPLACE_EXISTING); + List dependentFiles = new ArrayList<>(); + for (String fileName : dependentFileNames) { + if (is != null) { + is.close(); + } + is = getResourceStream(fileName); + File file = new File(dir, fileName); + Files.copy(is, file.toPath(), StandardCopyOption.REPLACE_EXISTING); + if (file.exists()) { + file.deleteOnExit(); + } + dependentFiles.add(file); + } ShutdownHookManager.get().addShutdownHook(temp::delete, LIBRARY_SHUTDOWN_HOOK_PRIORITY); - return Optional.of(temp); + return Pair.of(Optional.of(temp), dependentFiles); } finally { if (is != null) { is.close(); diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java new file mode 100644 index 000000000000..02125951c1fe --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import com.google.common.primitives.UnsignedLong; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.util.ClosableIterator; + +import java.util.Arrays; +import java.util.NoSuchElementException; +import java.util.function.Function; + +/** + * Iterator for SSTFileReader which would read all entries including tombstones. + */ +public class ManagedRawSSTFileIterator implements ClosableIterator { + // Native address of pointer to the object. + private final long nativeHandle; + private final Function transformer; + + ManagedRawSSTFileIterator(long nativeHandle, Function transformer) { + this.nativeHandle = nativeHandle; + this.transformer = transformer; + } + + private native boolean hasNext(long handle); + private native void next(long handle); + private native byte[] getKey(long handle); + private native byte[] getValue(long handle); + private native long getSequenceNumber(long handle); + private native int getType(long handle); + + @Override + public boolean hasNext() { + return this.hasNext(nativeHandle); + } + + @Override + public T next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + + KeyValue keyValue = new KeyValue(this.getKey(nativeHandle), + UnsignedLong.fromLongBits(this.getSequenceNumber(this.nativeHandle)), + this.getType(nativeHandle), + this.getValue(nativeHandle)); + this.next(nativeHandle); + return this.transformer.apply(keyValue); + } + + private native void closeInternal(long handle); + + @Override + public void close() { + this.closeInternal(this.nativeHandle); + } + + /** + * Class containing Parsed KeyValue Record from RawSstReader output. + */ + public static final class KeyValue { + + private final byte[] key; + private final UnsignedLong sequence; + private final Integer type; + private final byte[] value; + + private KeyValue(byte[] key, UnsignedLong sequence, Integer type, + byte[] value) { + this.key = key; + this.sequence = sequence; + this.type = type; + this.value = value; + } + + public byte[] getKey() { + return Arrays.copyOf(key, key.length); + } + + public UnsignedLong getSequence() { + return sequence; + } + + public Integer getType() { + return type; + } + + public byte[] getValue() { + return Arrays.copyOf(value, value.length); + } + + @Override + public String toString() { + return "KeyValue{" + + "key=" + StringUtils.bytes2String(key) + + ", sequence=" + sequence + + ", type=" + type + + ", value=" + StringUtils.bytes2String(value) + + '}'; + } + } +} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java new file mode 100644 index 000000000000..2a58dfce4c4c --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.hadoop.hdds.utils.NativeLibraryLoader; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.util.Arrays; +import java.util.function.Function; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; + +/** + * JNI for RocksDB RawSSTFileReader. + */ +public class ManagedRawSSTFileReader implements Closeable { + + public static boolean loadLibrary() throws NativeLibraryNotLoadedException { + ManagedRocksObjectUtils.loadRocksDBLibrary(); + if (!NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME, Arrays.asList( + ManagedRocksObjectUtils.getRocksDBLibFileName()))) { + throw new NativeLibraryNotLoadedException(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); + } + return true; + } + + private final String fileName; + // Native address of pointer to the object. + private final long nativeHandle; + private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSSTFileReader.class); + + public ManagedRawSSTFileReader(final ManagedOptions options, final String fileName, final int readAheadSize) { + this.fileName = fileName; + this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(), fileName, readAheadSize); + } + + public ManagedRawSSTFileIterator newIterator( + Function transformerFunction, + ManagedSlice fromSlice, ManagedSlice toSlice) { + long fromNativeHandle = fromSlice == null ? 0 : fromSlice.getNativeHandle(); + long toNativeHandle = toSlice == null ? 0 : toSlice.getNativeHandle(); + LOG.info("Iterating SST file: {} with native lib. " + + "LowerBound: {}, UpperBound: {}", fileName, fromSlice, toSlice); + return new ManagedRawSSTFileIterator<>( + newIterator(this.nativeHandle, fromSlice != null, + fromNativeHandle, toSlice != null, toNativeHandle), + transformerFunction); + } + + private native long newRawSSTFileReader(long optionsHandle, String filePath, int readSize); + + + private native long newIterator(long handle, boolean hasFrom, long fromSliceHandle, boolean hasTo, + long toSliceHandle); + + private native void disposeInternal(long handle); + + @Override + public void close() { + disposeInternal(nativeHandle); + } +} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java deleted file mode 100644 index d8844eaacbcd..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java +++ /dev/null @@ -1,314 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import com.google.common.collect.Maps; -import com.google.common.primitives.UnsignedLong; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.util.ClosableIterator; -import org.eclipse.jetty.io.RuntimeIOException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.UncheckedIOException; -import java.nio.ByteBuffer; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Arrays; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -/** - * Iterator to Parse output of RocksDBSSTDumpTool. - */ -public abstract class ManagedSSTDumpIterator implements ClosableIterator { - - private static final Logger LOG = - LoggerFactory.getLogger(ManagedSSTDumpIterator.class); - // Since we don't have any restriction on the key & value, we are prepending - // the length of the pattern in the sst dump tool output. - // The first token in the pattern is the key. - // The second tells the sequence number of the key. - // The third token gives the type of key in the sst file. - // The fourth token - private InputStream processOutput; - private Optional currentKey; - private byte[] intBuffer; - private Optional nextKey; - - private ManagedSSTDumpTool.SSTDumpToolTask sstDumpToolTask; - private AtomicBoolean open; - private StackTraceElement[] stackTrace; - - public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool, - String sstFilePath, ManagedOptions options) - throws IOException { - this(sstDumpTool, sstFilePath, options, null, null); - } - - public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool, - String sstFilePath, ManagedOptions options, - ManagedSlice lowerKeyBound, - ManagedSlice upperKeyBound) - throws IOException { - File sstFile = new File(sstFilePath); - if (!sstFile.exists()) { - throw new IOException(String.format("File in path : %s doesn't exist", - sstFile.getAbsolutePath())); - } - if (!sstFile.isFile()) { - throw new IOException(String.format("Path given: %s is not a file", - sstFile.getAbsolutePath())); - } - init(sstDumpTool, sstFile, options, lowerKeyBound, upperKeyBound); - this.stackTrace = Thread.currentThread().getStackTrace(); - } - - /** - * Parses next occuring number in the stream. - * - * @return Optional of the integer empty if no integer exists - */ - private Optional getNextNumberInStream() throws IOException { - int n = processOutput.read(intBuffer, 0, 4); - if (n == 4) { - return Optional.of(ByteBuffer.wrap(intBuffer).getInt()); - } else if (n >= 0) { - throw new IllegalStateException(String.format("Integer expects " + - "4 bytes to be read from the stream, but read only %d bytes", n)); - } - return Optional.empty(); - } - - private Optional getNextByteArray() throws IOException { - Optional size = getNextNumberInStream(); - if (size.isPresent()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Allocating byte array, size: {}", size.get()); - } - byte[] b = new byte[size.get()]; - int n = processOutput.read(b); - if (n >= 0 && n != size.get()) { - throw new IllegalStateException(String.format("Integer expects " + - "4 bytes to be read from the stream, but read only %d bytes", n)); - } - return Optional.of(b); - } - return Optional.empty(); - } - - private Optional getNextUnsignedLong() throws IOException { - long val = 0; - for (int i = 0; i < 8; i++) { - val = val << 8; - int nextByte = processOutput.read(); - if (nextByte < 0) { - if (i == 0) { - return Optional.empty(); - } - throw new IllegalStateException(String.format("Long expects " + - "8 bytes to be read from the stream, but read only %d bytes", i)); - } - val += nextByte; - } - return Optional.of(UnsignedLong.fromLongBits(val)); - } - - private void init(ManagedSSTDumpTool sstDumpTool, File sstFile, - ManagedOptions options, ManagedSlice lowerKeyBound, - ManagedSlice upperKeyBound) { - Map argMap = Maps.newHashMap(); - argMap.put("file", sstFile.getAbsolutePath()); - argMap.put("silent", null); - argMap.put("command", "scan"); - // strings containing '\0' do not have the same value when encode UTF-8 on - // java which is 0. But in jni the utf-8 encoded value for '\0' - // becomes -64 -128. Thus the value becomes different. - // In order to support this, changes have been made on the rocks-tools - // to pass the address of the ManagedSlice and the jni can use the object - // of slice directly from there. - if (Objects.nonNull(lowerKeyBound)) { - argMap.put("from", String.valueOf(lowerKeyBound.getNativeHandle())); - } - if (Objects.nonNull(upperKeyBound)) { - argMap.put("to", String.valueOf(upperKeyBound.getNativeHandle())); - } - this.sstDumpToolTask = sstDumpTool.run(argMap, options); - processOutput = sstDumpToolTask.getPipedOutput(); - intBuffer = new byte[4]; - open = new AtomicBoolean(true); - currentKey = Optional.empty(); - nextKey = Optional.empty(); - next(); - } - - /** - * Throws Runtime exception in the case iterator is closed or - * the native Dumptool exited with non zero exit value. - */ - private void checkSanityOfProcess() { - if (!this.open.get()) { - throw new RuntimeException("Iterator has been closed"); - } - if (sstDumpToolTask.getFuture().isDone() && - sstDumpToolTask.exitValue() != 0) { - throw new RuntimeException("Process Terminated with non zero " + - String.format("exit value %d", sstDumpToolTask.exitValue())); - } - } - - /** - * Checks the status of the process & sees if there is another record. - * - * @return True if next exists & false otherwise - * Throws Runtime Exception in case of SST File read failure - */ - - @Override - public boolean hasNext() { - checkSanityOfProcess(); - return nextKey.isPresent(); - } - - /** - * Transforms Key to a certain value. - * - * @param value - * @return transformed Value - */ - protected abstract T getTransformedValue(Optional value); - - /** - * Returns the next record from SSTDumpTool. - * - * @return next Key - * Throws Runtime Exception incase of failure. - */ - @Override - public T next() { - checkSanityOfProcess(); - currentKey = nextKey; - nextKey = Optional.empty(); - try { - Optional key = getNextByteArray(); - if (!key.isPresent()) { - return getTransformedValue(currentKey); - } - UnsignedLong sequenceNumber = getNextUnsignedLong() - .orElseThrow(() -> new IllegalStateException( - String.format("Error while trying to read sequence number" + - " for key %s", StringUtils.bytes2String(key.get())))); - - Integer type = getNextNumberInStream() - .orElseThrow(() -> new IllegalStateException( - String.format("Error while trying to read sequence number for " + - "key %s with sequence number %s", - StringUtils.bytes2String(key.get()), - sequenceNumber.toString()))); - byte[] val = getNextByteArray().orElseThrow(() -> - new IllegalStateException( - String.format("Error while trying to read sequence number for " + - "key %s with sequence number %s of type %d", - StringUtils.bytes2String(key.get()), - sequenceNumber.toString(), type))); - nextKey = Optional.of(new KeyValue(key.get(), sequenceNumber, type, val)); - } catch (IOException e) { - // TODO [SNAPSHOT] Throw custom snapshot exception - throw new RuntimeIOException(e); - } - return getTransformedValue(currentKey); - } - - @Override - public synchronized void close() throws UncheckedIOException { - if (this.sstDumpToolTask != null) { - if (!this.sstDumpToolTask.getFuture().isDone()) { - this.sstDumpToolTask.getFuture().cancel(true); - } - try { - this.processOutput.close(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - open.compareAndSet(true, false); - } - - @Override - protected void finalize() throws Throwable { - if (open.get()) { - LOG.warn("{} is not closed properly." + - " StackTrace for unclosed instance: {}", - this.getClass().getName(), - Arrays.stream(stackTrace) - .map(StackTraceElement::toString).collect( - Collectors.joining("\n"))); - } - this.close(); - super.finalize(); - } - - /** - * Class containing Parsed KeyValue Record from Sst Dumptool output. - */ - public static final class KeyValue { - - private final byte[] key; - private final UnsignedLong sequence; - private final Integer type; - private final byte[] value; - - private KeyValue(byte[] key, UnsignedLong sequence, Integer type, - byte[] value) { - this.key = key; - this.sequence = sequence; - this.type = type; - this.value = value; - } - - public byte[] getKey() { - return key; - } - - public UnsignedLong getSequence() { - return sequence; - } - - public Integer getType() { - return type; - } - - public byte[] getValue() { - return value; - } - - @Override - public String toString() { - return "KeyValue{" + - "key=" + StringUtils.bytes2String(key) + - ", sequence=" + sequence + - ", type=" + type + - ", value=" + StringUtils.bytes2String(value) + - '}'; - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java deleted file mode 100644 index 5d965d7398e0..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; - -import java.io.InputStream; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - -/** - * JNI for RocksDB SSTDumpTool. Pipes the output to an output stream - */ -public class ManagedSSTDumpTool { - - private int bufferCapacity; - private ExecutorService executorService; - - public ManagedSSTDumpTool(ExecutorService executorService, - int bufferCapacity) - throws NativeLibraryNotLoadedException { - if (!NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) { - throw new NativeLibraryNotLoadedException( - ROCKS_TOOLS_NATIVE_LIBRARY_NAME); - } - this.bufferCapacity = bufferCapacity; - this.executorService = executorService; - } - - public SSTDumpToolTask run(String[] args, ManagedOptions options) { - PipeInputStream pipeInputStream = new PipeInputStream(bufferCapacity); - return new SSTDumpToolTask(this.executorService.submit(() -> - this.runInternal(args, options.getNativeHandle(), - pipeInputStream.getNativeHandle())), pipeInputStream); - } - - public SSTDumpToolTask run(Map args, ManagedOptions options) { - return this.run(args.entrySet().stream().map(e -> "--" - + (e.getValue() == null || e.getValue().isEmpty() ? e.getKey() : - e.getKey() + "=" + e.getValue())).toArray(String[]::new), options); - } - - private native int runInternal(String[] args, long optionsHandle, - long pipeHandle); - - /** - * Class holding piped output of SST Dumptool & future of command. - */ - static class SSTDumpToolTask { - private Future future; - private InputStream pipedOutput; - - SSTDumpToolTask(Future future, InputStream pipedOutput) { - this.future = future; - this.pipedOutput = pipedOutput; - } - - public Future getFuture() { - return future; - } - - public InputStream getPipedOutput() { - return pipedOutput; - } - - public int exitValue() { - if (this.future.isDone()) { - try { - return future.get(); - } catch (InterruptedException | ExecutionException e) { - return 1; - } - } - return 0; - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java deleted file mode 100644 index df4f613f98e2..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import java.io.InputStream; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * JNI for reading data from pipe. - */ -public class PipeInputStream extends InputStream { - - private byte[] byteBuffer; - private long nativeHandle; - private int numberOfBytesLeftToRead; - private int index = 0; - private int capacity; - - private AtomicBoolean cleanup; - - PipeInputStream(int capacity) { - this.byteBuffer = new byte[capacity]; - this.numberOfBytesLeftToRead = 0; - this.capacity = capacity; - this.nativeHandle = newPipe(); - this.cleanup = new AtomicBoolean(false); - } - - long getNativeHandle() { - return nativeHandle; - } - - @Override - public int read() { - if (numberOfBytesLeftToRead < 0) { - this.close(); - return -1; - } - while (numberOfBytesLeftToRead == 0) { - numberOfBytesLeftToRead = readInternal(byteBuffer, capacity, - nativeHandle); - index = 0; - if (numberOfBytesLeftToRead != 0) { - return read(); - } - } - numberOfBytesLeftToRead--; - int ret = byteBuffer[index] & 0xFF; - index += 1; - return ret; - } - - private native long newPipe(); - - private native int readInternal(byte[] buff, int numberOfBytes, - long pipeHandle); - - private native void closeInternal(long pipeHandle); - - @Override - public void close() { - if (this.cleanup.compareAndSet(false, true)) { - closeInternal(this.nativeHandle); - } - } - - @Override - protected void finalize() throws Throwable { - close(); - super.finalize(); - } -} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp new file mode 100644 index 000000000000..1cf222528379 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator.h" +#include "rocksdb/options.h" +#include "rocksdb/raw_iterator.h" +#include +#include "cplusplus_to_java_convert.h" +#include + +jboolean Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_hasNext(JNIEnv *env, jobject obj, + jlong native_handle) { + return static_cast(reinterpret_cast(native_handle)->Valid()); +} + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_next(JNIEnv *env, jobject obj, + jlong native_handle) { + reinterpret_cast(native_handle)->Next(); +} + +jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getKey(JNIEnv *env, + jobject obj, + jlong native_handle) { + ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->key(); + jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(slice.size()), + const_cast(reinterpret_cast(slice.data()))); + return jkey; +} + + +jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getValue(JNIEnv *env, + jobject obj, + jlong native_handle) { + ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->value(); + jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(slice.size()), + const_cast(reinterpret_cast(slice.data()))); + return jkey; +} + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv *env, + jobject obj, + jlong native_handle) { + uint64_t sequence_number = + reinterpret_cast(native_handle)->sequenceNumber(); + jlong result; + std::memcpy(&result, &sequence_number, sizeof(jlong)); + return result; +} + + +jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getType(JNIEnv *env, + jobject obj, + jlong native_handle) { + uint32_t type = reinterpret_cast(native_handle)->type(); + return static_cast(type); +} + + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_closeInternal(JNIEnv *env, + jobject obj, + jlong native_handle) { + delete reinterpret_cast(native_handle); +} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp new file mode 100644 index 000000000000..f3b8dc02639d --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader.h" +#include "rocksdb/options.h" +#include "rocksdb/raw_sst_file_reader.h" +#include "rocksdb/raw_iterator.h" +#include +#include "cplusplus_to_java_convert.h" +#include + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv *env, jobject obj, + jlong options_handle, + jstring jfilename, + jint readahead_size) { + ROCKSDB_NAMESPACE::Options *options = reinterpret_cast(options_handle); + const char *file_path = env->GetStringUTFChars(jfilename, nullptr); + size_t read_ahead_size_value = static_cast(readahead_size); + ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = + new ROCKSDB_NAMESPACE::RawSstFileReader(*options, file_path, read_ahead_size_value, true, true); + env->ReleaseStringUTFChars(jfilename, file_path); + return GET_CPLUSPLUS_POINTER(raw_sst_file_reader); +} + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newIterator(JNIEnv *env, jobject obj, + jlong native_handle, + jboolean jhas_from, + jlong from_slice_handle, + jboolean jhas_to, + jlong to_slice_handle) { + ROCKSDB_NAMESPACE::Slice* from_slice = nullptr; + ROCKSDB_NAMESPACE::Slice* to_slice = nullptr; + ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = + reinterpret_cast(native_handle); + bool has_from = static_cast(jhas_from); + bool has_to = static_cast(jhas_to); + if (has_from) { + from_slice = reinterpret_cast(from_slice_handle); + } + if (has_to) { + to_slice = reinterpret_cast(to_slice_handle); + } + ROCKSDB_NAMESPACE::RawIterator* iterator = raw_sst_file_reader->newIterator(has_from, from_slice, has_to, to_slice); + return GET_CPLUSPLUS_POINTER(iterator); +} + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_disposeInternal(JNIEnv *env, jobject obj, + jlong native_handle) { + delete reinterpret_cast(native_handle); +} diff --git a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp b/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp deleted file mode 100644 index 53f60cdd65af..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include "Pipe.h" -#include "cplusplus_to_java_convert.h" -#include "org_apache_hadoop_hdds_utils_db_managed_PipeInputStream.h" - - -jlong Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_newPipe(JNIEnv *, jobject) { - Pipe *pipe = new Pipe(); - return GET_CPLUSPLUS_POINTER(pipe); -} - -jint Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_readInternal(JNIEnv *env, jobject object, jbyteArray jbyteArray, jint capacity, jlong nativeHandle) { - int cap_int = capacity; - Pipe *pipe = reinterpret_cast(nativeHandle); - jbyte *b = (env)->GetByteArrayElements(jbyteArray, JNI_FALSE); - cap_int = read(pipe->getReadFd(), b, cap_int); - if (cap_int == 0) { - if (!pipe->isOpen()) { - cap_int = -1; - } - } - env->ReleaseByteArrayElements(jbyteArray, b, 0); - return cap_int; -} - -void Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_closeInternal(JNIEnv *env, jobject object, jlong nativeHandle) { - delete reinterpret_cast(nativeHandle); -} - diff --git a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp b/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp deleted file mode 100644 index 285c5906c2d8..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool.h" -#include "rocksdb/options.h" -#include "rocksdb/sst_dump_tool.h" -#include -#include "cplusplus_to_java_convert.h" -#include "Pipe.h" -#include - -jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool_runInternal(JNIEnv *env, jobject obj, - jobjectArray argsArray, jlong optionsHandle, jlong pipeHandle) { - ROCKSDB_NAMESPACE::SSTDumpTool dumpTool; - ROCKSDB_NAMESPACE::Options options; - Pipe *pipe = reinterpret_cast(pipeHandle); - int length = env->GetArrayLength(argsArray); - char *args[length + 1]; - for (int i = 0; i < length; i++) { - jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)i); - char *utf_str = (char *)env->GetStringUTFChars(str_val, JNI_FALSE); - args[i + 1] = utf_str; - } - FILE *wr = fdopen(pipe->getWriteFd(), "w"); - int ret = dumpTool.Run(length + 1, args, options, wr); - for (int i = 1; i < length + 1; i++) { - jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)(i - 1)); - env->ReleaseStringUTFChars(str_val, args[i]); - } - fclose(wr); - pipe->close(); - return ret; -} diff --git a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h index efe9d4a5be24..4862ea12a1b9 100644 --- a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h +++ b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h @@ -16,7 +16,7 @@ * limitations under the License. */ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). diff --git a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch index 841c2533b863..12dc74614a45 100644 --- a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch +++ b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch @@ -16,592 +16,531 @@ * limitations under the License. */ -diff --git a/include/rocksdb/sst_dump_tool.h b/include/rocksdb/sst_dump_tool.h -index 9261ba47d..1e62b88a3 100644 ---- a/include/rocksdb/sst_dump_tool.h -+++ b/include/rocksdb/sst_dump_tool.h -@@ -11,7 +11,8 @@ namespace ROCKSDB_NAMESPACE { - - class SSTDumpTool { - public: -- int Run(int argc, char const* const* argv, Options options = Options()); -+ int Run(int argc, char const* const* argv, Options options = Options(), -+ FILE* out = stdout, FILE* err = stderr); - }; - - } // namespace ROCKSDB_NAMESPACE -diff --git a/table/sst_file_dumper.cc b/table/sst_file_dumper.cc -index eefbaaeee..734a2f0dd 100644 ---- a/table/sst_file_dumper.cc -+++ b/table/sst_file_dumper.cc -@@ -45,7 +45,7 @@ SstFileDumper::SstFileDumper(const Options& options, - Temperature file_temp, size_t readahead_size, - bool verify_checksum, bool output_hex, - bool decode_blob_index, const EnvOptions& soptions, -- bool silent) -+ bool silent, FILE* out, FILE* err) - : file_name_(file_path), - read_num_(0), - file_temp_(file_temp), -@@ -57,10 +57,13 @@ SstFileDumper::SstFileDumper(const Options& options, - ioptions_(options_), - moptions_(ColumnFamilyOptions(options_)), - read_options_(verify_checksum, false), -- internal_comparator_(BytewiseComparator()) { -+ internal_comparator_(BytewiseComparator()), -+ out_(out), -+ err_(err) -+ { - read_options_.readahead_size = readahead_size; - if (!silent_) { -- fprintf(stdout, "Process %s\n", file_path.c_str()); -+ fprintf(out_, "Process %s\n", file_path.c_str()); - } - init_result_ = GetTableReader(file_name_); - } -@@ -253,17 +256,17 @@ Status SstFileDumper::ShowAllCompressionSizes( - int32_t compress_level_from, int32_t compress_level_to, - uint32_t max_dict_bytes, uint32_t zstd_max_train_bytes, - uint64_t max_dict_buffer_bytes, bool use_zstd_dict_trainer) { -- fprintf(stdout, "Block Size: %" ROCKSDB_PRIszt "\n", block_size); -+ fprintf(out_, "Block Size: %" ROCKSDB_PRIszt "\n", block_size); - for (auto& i : compression_types) { - if (CompressionTypeSupported(i.first)) { -- fprintf(stdout, "Compression: %-24s\n", i.second); -+ fprintf(out_, "Compression: %-24s\n", i.second); - CompressionOptions compress_opt; - compress_opt.max_dict_bytes = max_dict_bytes; - compress_opt.zstd_max_train_bytes = zstd_max_train_bytes; - compress_opt.max_dict_buffer_bytes = max_dict_buffer_bytes; - compress_opt.use_zstd_dict_trainer = use_zstd_dict_trainer; - for (int32_t j = compress_level_from; j <= compress_level_to; j++) { -- fprintf(stdout, "Compression level: %d", j); -+ fprintf(out_, "Compression level: %d", j); - compress_opt.level = j; - Status s = ShowCompressionSize(block_size, i.first, compress_opt); - if (!s.ok()) { -@@ -271,7 +274,7 @@ Status SstFileDumper::ShowAllCompressionSizes( - } - } - } else { -- fprintf(stdout, "Unsupported compression type: %s.\n", i.second); -+ fprintf(err_, "Unsupported compression type: %s.\n", i.second); - } - } - return Status::OK(); -@@ -307,9 +310,9 @@ Status SstFileDumper::ShowCompressionSize( - } - - std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); -- fprintf(stdout, " Size: %10" PRIu64, file_size); -- fprintf(stdout, " Blocks: %6" PRIu64, num_data_blocks); -- fprintf(stdout, " Time Taken: %10s microsecs", -+ fprintf(out_, " Size: %10" PRIu64, file_size); -+ fprintf(out_, " Blocks: %6" PRIu64, num_data_blocks); -+ fprintf(out_, " Time Taken: %10s microsecs", - std::to_string( - std::chrono::duration_cast(end - start) - .count()) -@@ -342,11 +345,11 @@ Status SstFileDumper::ShowCompressionSize( - : ((static_cast(not_compressed_blocks) / - static_cast(num_data_blocks)) * - 100.0); -- fprintf(stdout, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks, -+ fprintf(out_, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks, - compressed_pcnt); -- fprintf(stdout, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)", -+ fprintf(out_, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)", - ratio_not_compressed_blocks, ratio_not_compressed_pcnt); -- fprintf(stdout, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n", -+ fprintf(out_, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n", - not_compressed_blocks, not_compressed_pcnt); - return Status::OK(); - } -@@ -362,7 +365,7 @@ Status SstFileDumper::ReadTableProperties(uint64_t table_magic_number, - /* memory_allocator= */ nullptr, prefetch_buffer); - if (!s.ok()) { - if (!silent_) { -- fprintf(stdout, "Not able to read table properties\n"); -+ fprintf(err_, "Not able to read table properties\n"); - } - } - return s; -@@ -410,7 +413,7 @@ Status SstFileDumper::SetTableOptionsByMagicNumber( - - options_.table_factory.reset(NewPlainTableFactory(plain_table_options)); - if (!silent_) { -- fprintf(stdout, "Sst file format: plain table\n"); -+ fprintf(out_, "Sst file format: plain table\n"); - } - } else { - char error_msg_buffer[80]; -@@ -427,15 +430,56 @@ Status SstFileDumper::SetOldTableOptions() { - assert(table_properties_ == nullptr); - options_.table_factory = std::make_shared(); - if (!silent_) { -- fprintf(stdout, "Sst file format: block-based(old version)\n"); -+ fprintf(out_, "Sst file format: block-based(old version)\n"); - } - - return Status::OK(); - } - -+void write(int value, FILE* file) { -+ char b[4]; -+ b[3] = value & 0x000000ff; -+ b[2] = (value & 0x0000ff00) >> 8; -+ b[1] = (value & 0x00ff0000) >> 16; -+ b[0] = (value & 0xff000000) >> 24; -+ std::fwrite(b, 4, 1, file); +diff --git a/include/rocksdb/raw_iterator.h b/include/rocksdb/raw_iterator.h +new file mode 100644 +index 000000000..21242ed15 +--- /dev/null ++++ b/include/rocksdb/raw_iterator.h +@@ -0,0 +1,25 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE ++ ++ ++#include "rocksdb/advanced_options.h" ++namespace ROCKSDB_NAMESPACE { ++ ++class RawIterator { ++ public: ++ virtual ~RawIterator() {} ++ virtual bool Valid() const = 0; ++ virtual Slice key() const = 0; ++ virtual Slice value() const = 0; ++ virtual uint64_t sequenceNumber() const = 0; ++ virtual uint32_t type() const = 0; ++ virtual void Next() = 0; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/include/rocksdb/raw_sst_file_reader.h b/include/rocksdb/raw_sst_file_reader.h +new file mode 100644 +index 000000000..09e748208 +--- /dev/null ++++ b/include/rocksdb/raw_sst_file_reader.h +@@ -0,0 +1,62 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE ++ ++#include ++#include ++ ++#include "rocksdb/raw_iterator.h" ++#include "rocksdb/advanced_options.h" ++#include "rocksdb/options.h" ++ ++ ++ ++namespace ROCKSDB_NAMESPACE { ++ ++class RawSstFileReader { ++ public: ++ ++ RawSstFileReader(const Options& options, const std::string& file_name, ++ size_t readahead_size, bool verify_checksum, ++ bool silent = false); ++ ~RawSstFileReader(); ++ ++ RawIterator* newIterator(bool has_from, Slice* from, ++ bool has_to, Slice *to); ++ Status getStatus() { return init_result_; } ++ ++ private: ++ // Get the TableReader implementation for the sst file ++ Status GetTableReader(const std::string& file_path); ++ Status ReadTableProperties(uint64_t table_magic_number, ++ uint64_t file_size); ++ ++ Status SetTableOptionsByMagicNumber(uint64_t table_magic_number); ++ Status SetOldTableOptions(); ++ ++ // Helper function to call the factory with settings specific to the ++ // factory implementation ++ Status NewTableReader(uint64_t file_size); ++ ++ std::string file_name_; ++ Temperature file_temp_; ++ ++ // less verbose in stdout/stderr ++ bool silent_; ++ ++ // options_ and internal_comparator_ will also be used in ++ // ReadSequential internally (specifically, seek-related operations) ++ Options options_; ++ ++ Status init_result_; ++ ++ struct Rep; ++ std::unique_ptr rep_; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/src.mk b/src.mk +index b94bc43ca..c13e5cde6 100644 +--- a/src.mk ++++ b/src.mk +@@ -338,11 +338,8 @@ RANGE_TREE_SOURCES =\ + utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc + + TOOL_LIB_SOURCES = \ +- tools/io_tracer_parser_tool.cc \ +- tools/ldb_cmd.cc \ +- tools/ldb_tool.cc \ +- tools/sst_dump_tool.cc \ +- utilities/blob_db/blob_dump_tool.cc \ ++ tools/raw_sst_file_reader.cc \ ++ tools/raw_sst_file_iterator.cc \ + + ANALYZER_LIB_SOURCES = \ + tools/block_cache_analyzer/block_cache_trace_analyzer.cc \ +diff --git a/tools/raw_sst_file_iterator.cc b/tools/raw_sst_file_iterator.cc +new file mode 100644 +index 000000000..3051637a3 +--- /dev/null ++++ b/tools/raw_sst_file_iterator.cc +@@ -0,0 +1,76 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++// ++#ifndef ROCKSDB_LITE ++ ++ ++#include ++#include ++ ++#include "db/memtable.h" ++#include "db/write_batch_internal.h" ++#include "rocksdb/status.h" ++#include "rocksdb/utilities/ldb_cmd.h" ++#include "table/block_based/block.h" ++#include "table/block_based/block_based_table_builder.h" ++#include "table/block_based/block_based_table_factory.h" ++#include "table/meta_blocks.h" ++#include "table/plain/plain_table_factory.h" ++#include "tools/raw_sst_file_iterator.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++RawSstFileIterator::RawSstFileIterator(InternalIterator* iterator, ++ bool has_from, Slice* from_key, ++ bool has_to, Slice* to_key) ++ : iter_(iterator), ++ ikey(new ParsedInternalKey()), ++ has_to_(has_to), ++ to_key_(to_key) { ++ if (has_from) { ++ InternalKey k; ++ k.SetMinPossibleForUserKey(*from_key); ++ iter_->Seek(k.Encode()); ++ } else { ++ iter_->SeekToFirst(); ++ } ++ initKey(); ++} ++ ++bool RawSstFileIterator::Valid() const { ++ return iter_->Valid() && (!has_to_ || ++ BytewiseComparator()->Compare( ++ key(), *to_key_) < 0); ++} ++ ++void RawSstFileIterator::initKey() { ++ if (iter_->Valid()) { ++ ParseInternalKey(iter_->key(), ikey, true /* log_err_key */); ++ } +} ++void RawSstFileIterator::Next() { ++ iter_->Next(); ++ initKey(); ++ ++} ++ ++Slice RawSstFileIterator::key() const { ++ return ikey->user_key; ++} ++ ++uint64_t RawSstFileIterator::sequenceNumber() const { ++ return ikey->sequence; ++} ++ ++uint32_t RawSstFileIterator::type() const { ++ return static_cast(ikey->type); ++} ++ ++Slice RawSstFileIterator::value() const { ++ return iter_->value(); ++} ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/tools/raw_sst_file_iterator.h b/tools/raw_sst_file_iterator.h +new file mode 100644 +index 000000000..58e34b260 +--- /dev/null ++++ b/tools/raw_sst_file_iterator.h +@@ -0,0 +1,45 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE + -+void write(const char* value, int length, FILE* file) { -+ write(length, file); -+ fwrite(value, length, 1, file); ++#include ++#include ++#include "file/writable_file_writer.h" ++#include "rocksdb/advanced_options.h" ++#include "rocksdb/raw_iterator.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++class RawSstFileIterator : public RawIterator { ++ public: ++ explicit RawSstFileIterator(InternalIterator* iterator, ++ bool has_from, ++ Slice* from_key, ++ bool has_to, ++ Slice* to_key); ++ ++ bool Valid() const override; ++ Slice key() const override; ++ Slice value() const override; ++ uint64_t sequenceNumber() const override; ++ uint32_t type() const override; ++ void Next() final override; ++ ++ ~RawSstFileIterator(){ ++ delete iter_; ++ } ++ ++ private: ++ void initKey(); ++ InternalIterator* iter_; ++ ParsedInternalKey* ikey; ++ bool has_to_; ++ Slice* to_key_; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/tools/raw_sst_file_reader.cc b/tools/raw_sst_file_reader.cc +new file mode 100644 +index 000000000..5ba8a82ee +--- /dev/null ++++ b/tools/raw_sst_file_reader.cc +@@ -0,0 +1,272 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++// ++#ifndef ROCKSDB_LITE ++ ++#include "rocksdb/raw_sst_file_reader.h" ++ ++#include ++#include ++#include ++#include ++ ++ ++#include "db/memtable.h" ++#include "db/write_batch_internal.h" ++#include "options/cf_options.h" ++#include "rocksdb/env.h" ++#include "rocksdb/slice_transform.h" ++#include "rocksdb/status.h" ++#include "rocksdb/table_properties.h" ++#include "rocksdb/utilities/ldb_cmd.h" ++#include "table/block_based/block.h" ++#include "table/block_based/block_based_table_builder.h" ++#include "table/block_based/block_based_table_factory.h" ++#include "table/format.h" ++#include "table/meta_blocks.h" ++#include "table/plain/plain_table_factory.h" ++#include "table/table_reader.h" ++#include "tools/raw_sst_file_iterator.h" ++#include "db/dbformat.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++struct RawSstFileReader::Rep { ++ Options options; ++ EnvOptions soptions_; ++ ReadOptions read_options_; ++ ImmutableOptions ioptions_; ++ MutableCFOptions moptions_; ++ InternalKeyComparator internal_comparator_; ++ std::unique_ptr table_properties_; ++ std::unique_ptr table_reader_; ++ std::unique_ptr file_; ++ ++ Rep(const Options& opts, bool verify_checksum, size_t readahead_size) ++ : options(opts), ++ soptions_(EnvOptions()), ++ read_options_(verify_checksum, false), ++ ioptions_(options), ++ moptions_(ColumnFamilyOptions(options)), ++ internal_comparator_(InternalKeyComparator(BytewiseComparator())) { ++ read_options_.readahead_size = readahead_size; ++ } ++}; ++ ++RawSstFileReader::RawSstFileReader(const Options& options, ++ const std::string& file_name, ++ size_t readahead_size, ++ bool verify_checksum, ++ bool silent) :rep_(new Rep(options, ++ verify_checksum, ++ readahead_size)) { ++ file_name_ = file_name; ++ silent_ = silent; ++ options_ = options; ++ file_temp_ = Temperature::kUnknown; ++ init_result_ = GetTableReader(file_name_); +} + -+void write(const std::string& value, FILE* file) { -+ write(value.data(), (int)value.length(), file); ++RawSstFileReader::~RawSstFileReader() {} ++ ++ ++ ++extern const uint64_t kBlockBasedTableMagicNumber; ++extern const uint64_t kLegacyBlockBasedTableMagicNumber; ++extern const uint64_t kPlainTableMagicNumber; ++extern const uint64_t kLegacyPlainTableMagicNumber; ++ ++Status RawSstFileReader::GetTableReader(const std::string& file_path) { ++ // Warning about 'magic_number' being uninitialized shows up only in UBsan ++ // builds. Though access is guarded by 's.ok()' checks, fix the issue to ++ // avoid any warnings. ++ uint64_t magic_number = Footer::kNullTableMagicNumber; ++ ++ // read table magic number ++ Footer footer; ++ ++ const auto& fs = options_.env->GetFileSystem(); ++ std::unique_ptr file; ++ uint64_t file_size = 0; ++ FileOptions fopts = rep_->soptions_; ++ fopts.temperature = file_temp_; ++ Status s = fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); ++ if (s.ok()) { ++ s = fs->GetFileSize(file_path, IOOptions(), &file_size, nullptr); ++ } ++ ++ // check empty file ++ // if true, skip further processing of this file ++ if (file_size == 0) { ++ return Status::Aborted(file_path, "Empty file"); ++ } ++ ++ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); ++ ++ FilePrefetchBuffer prefetch_buffer( ++ 0 /* readahead_size */, 0 /* max_readahead_size */, true /* enable */, ++ false /* track_min_offset */); ++ if (s.ok()) { ++ const uint64_t kSstDumpTailPrefetchSize = 512 * 1024; ++ uint64_t prefetch_size = (file_size > kSstDumpTailPrefetchSize) ++ ? kSstDumpTailPrefetchSize ++ : file_size; ++ uint64_t prefetch_off = file_size - prefetch_size; ++ IOOptions opts; ++ s = prefetch_buffer.Prefetch(opts, rep_->file_.get(), prefetch_off, ++ static_cast(prefetch_size), ++ Env::IO_TOTAL /* rate_limiter_priority */); ++ ++ s = ReadFooterFromFile(opts, rep_->file_.get(), &prefetch_buffer, file_size, ++ &footer); ++ } ++ if (s.ok()) { ++ magic_number = footer.table_magic_number(); ++ } ++ ++ if (s.ok()) { ++ if (magic_number == kPlainTableMagicNumber || ++ magic_number == kLegacyPlainTableMagicNumber) { ++ rep_->soptions_.use_mmap_reads = true; ++ ++ fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); ++ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); ++ } ++ ++ s = ROCKSDB_NAMESPACE::ReadTableProperties( ++ rep_->file_.get(), file_size, magic_number, rep_->ioptions_, &(rep_->table_properties_), ++ /* memory_allocator= */ nullptr, (magic_number == kBlockBasedTableMagicNumber) ++ ? &prefetch_buffer ++ : nullptr); ++ // For old sst format, ReadTableProperties might fail but file can be read ++ if (s.ok()) { ++ s = SetTableOptionsByMagicNumber(magic_number); ++ if (s.ok()) { ++ if (rep_->table_properties_ && !rep_->table_properties_->comparator_name.empty()) { ++ ConfigOptions config_options; ++ const Comparator* user_comparator = nullptr; ++ s = Comparator::CreateFromString(config_options, ++ rep_->table_properties_->comparator_name, ++ &user_comparator); ++ if (s.ok()) { ++ assert(user_comparator); ++ rep_->internal_comparator_ = InternalKeyComparator(user_comparator); ++ } ++ } ++ } ++ } else { ++ if (!silent_) { ++ fprintf(stderr, "Not able to read table properties\n"); ++ } ++ s = SetOldTableOptions(); ++ } ++ options_.comparator = rep_->internal_comparator_.user_comparator(); ++ } ++ ++ if (s.ok()) { ++ s = NewTableReader(file_size); ++ } ++ return s; +} + -+void write(Slice &slice, FILE* file) { -+ int size = (int)slice.size(); -+ write(slice.data(), size, file); ++Status RawSstFileReader::NewTableReader(uint64_t file_size) { ++ auto t_opt = ++ TableReaderOptions(rep_->ioptions_, rep_->moptions_.prefix_extractor, rep_->soptions_, ++ rep_->internal_comparator_, false /* skip_filters */, ++ false /* imortal */, true /* force_direct_prefetch */); ++ // Allow open file with global sequence number for backward compatibility. ++ t_opt.largest_seqno = kMaxSequenceNumber; ++ ++ // We need to turn off pre-fetching of index and filter nodes for ++ // BlockBasedTable ++ if (options_.table_factory->IsInstanceOf( ++ TableFactory::kBlockBasedTableName())) { ++ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), ++ file_size, &(rep_->table_reader_), ++ /*enable_prefetch=*/false); ++ } ++ ++ // For all other factory implementation ++ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), ++ file_size, &(rep_->table_reader_)); +} + -+void write(SequenceNumber sequenceNumber, FILE* file) { ++Status RawSstFileReader::SetTableOptionsByMagicNumber( ++ uint64_t table_magic_number) { ++ assert(rep_->table_properties_); ++ if (table_magic_number == kBlockBasedTableMagicNumber || ++ table_magic_number == kLegacyBlockBasedTableMagicNumber) { ++ BlockBasedTableFactory* bbtf = new BlockBasedTableFactory(); ++ // To force tail prefetching, we fake reporting two useful reads of 512KB ++ // from the tail. ++ // It needs at least two data points to warm up the stats. ++ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); ++ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); ++ ++ options_.table_factory.reset(bbtf); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: block-based\n"); ++ } ++ ++ auto& props = rep_->table_properties_->user_collected_properties; ++ auto pos = props.find(BlockBasedTablePropertyNames::kIndexType); ++ if (pos != props.end()) { ++ auto index_type_on_file = static_cast( ++ DecodeFixed32(pos->second.c_str())); ++ if (index_type_on_file == ++ BlockBasedTableOptions::IndexType::kHashSearch) { ++ options_.prefix_extractor.reset(NewNoopTransform()); ++ } ++ } ++ } else if (table_magic_number == kPlainTableMagicNumber || ++ table_magic_number == kLegacyPlainTableMagicNumber) { ++ options_.allow_mmap_reads = true; + -+ char b[8]; -+ int idx = 7; -+ while (idx >= 0) { -+ b[idx] = sequenceNumber % 256; -+ sequenceNumber /= 256; -+ idx -= 1; ++ PlainTableOptions plain_table_options; ++ plain_table_options.user_key_len = kPlainTableVariableLength; ++ plain_table_options.bloom_bits_per_key = 0; ++ plain_table_options.hash_table_ratio = 0; ++ plain_table_options.index_sparseness = 1; ++ plain_table_options.huge_page_tlb_size = 0; ++ plain_table_options.encoding_type = kPlain; ++ plain_table_options.full_scan_mode = true; ++ ++ options_.table_factory.reset(NewPlainTableFactory(plain_table_options)); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: plain table\n"); ++ } ++ } else { ++ char error_msg_buffer[80]; ++ snprintf(error_msg_buffer, sizeof(error_msg_buffer) - 1, ++ "Unsupported table magic number --- %lx", ++ (long)table_magic_number); ++ return Status::InvalidArgument(error_msg_buffer); + } -+ fwrite(b, 8, 1, file); ++ ++ return Status::OK(); +} + -+void write(ParsedInternalKey &key, FILE* file) { -+ write(key.user_key, file); -+ write(key.sequence, file); -+ write(static_cast(key.type), file); ++Status RawSstFileReader::SetOldTableOptions() { ++ assert(rep_->table_properties_ == nullptr); ++ options_.table_factory = std::make_shared(); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: block-based(old version)\n"); ++ } ++ ++ return Status::OK(); +} + - Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, -- bool has_from, const std::string& from_key, -- bool has_to, const std::string& to_key, -+ bool has_from, const Slice& from_key, -+ bool has_to, const Slice& to_key, - bool use_from_as_prefix) { - if (!table_reader_) { - return init_result_; -@@ -446,6 +490,7 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - /*arena=*/nullptr, /*skip_filters=*/false, - TableReaderCaller::kSSTDumpTool); - uint64_t i = 0; -+ - if (has_from) { - InternalKey ikey; - ikey.SetMinPossibleForUserKey(from_key); -@@ -453,6 +498,7 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - } else { - iter->SeekToFirst(); - } -+ - for (; iter->Valid(); iter->Next()) { - Slice key = iter->key(); - Slice value = iter->value(); -@@ -478,22 +524,19 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - - if (print_kv) { - if (!decode_blob_index_ || ikey.type != kTypeBlobIndex) { -- fprintf(stdout, "%s => %s\n", -- ikey.DebugString(true, output_hex_).c_str(), -- value.ToString(output_hex_).c_str()); -+ write(ikey, out_); -+ write(value, out_); - } else { - BlobIndex blob_index; -- - const Status s = blob_index.DecodeFrom(value); - if (!s.ok()) { -- fprintf(stderr, "%s => error decoding blob index\n", -- ikey.DebugString(true, output_hex_).c_str()); -+ write(ikey, err_); -+ write("error decoding blob index", err_); - continue; - } -- -- fprintf(stdout, "%s => %s\n", -- ikey.DebugString(true, output_hex_).c_str(), -- blob_index.DebugString(output_hex_).c_str()); -+ write(ikey, out_); -+ std::string v = blob_index.DebugString(output_hex_); -+ write(v, out_); - } - } - } -diff --git a/table/sst_file_dumper.h b/table/sst_file_dumper.h -index 7be876390..768c5b1e2 100644 ---- a/table/sst_file_dumper.h -+++ b/table/sst_file_dumper.h -@@ -22,11 +22,13 @@ class SstFileDumper { - bool verify_checksum, bool output_hex, - bool decode_blob_index, - const EnvOptions& soptions = EnvOptions(), -- bool silent = false); -+ bool silent = false, -+ FILE* out = stdout, -+ FILE* err = stderr); - - Status ReadSequential(bool print_kv, uint64_t read_num, bool has_from, -- const std::string& from_key, bool has_to, -- const std::string& to_key, -+ const Slice& from_key, bool has_to, -+ const Slice& to_key, - bool use_from_as_prefix = false); - - Status ReadTableProperties( -@@ -94,6 +96,8 @@ class SstFileDumper { - ReadOptions read_options_; - InternalKeyComparator internal_comparator_; - std::unique_ptr table_properties_; -+ FILE* out_; -+ FILE* err_; - }; - - } // namespace ROCKSDB_NAMESPACE -diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc -index 7053366e7..8f248ddf3 100644 ---- a/tools/sst_dump_tool.cc -+++ b/tools/sst_dump_tool.cc -@@ -31,7 +31,7 @@ static const std::vector> - - namespace { - --void print_help(bool to_stderr) { -+void print_help(bool to_stderr, FILE* err_, FILE* out_) { - std::string supported_compressions; - for (CompressionType ct : GetSupportedCompressions()) { - if (!supported_compressions.empty()) { -@@ -43,7 +43,7 @@ void print_help(bool to_stderr) { - supported_compressions += str; - } - fprintf( -- to_stderr ? stderr : stdout, -+ to_stderr ? err_ : out_, - R"(sst_dump --file= [--command=check|scan|raw|recompress|identify] - --file= - Path to SST file or directory containing SST files -@@ -149,7 +149,13 @@ bool ParseIntArg(const char* arg, const std::string arg_name, - } - } // namespace - --int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { -+Slice* AssignSlicePrependedWithLength(const char* buf) { -+ long val = std::stol(buf); -+ return reinterpret_cast(val); ++RawIterator* RawSstFileReader::newIterator( ++ bool has_from, Slice* from, bool has_to, Slice* to) { ++ InternalIterator* iter = rep_->table_reader_->NewIterator( ++ rep_->read_options_, rep_->moptions_.prefix_extractor.get(), ++ /*arena=*/nullptr, /*skip_filters=*/false, ++ TableReaderCaller::kSSTDumpTool); ++ return new RawSstFileIterator(iter, has_from, from, has_to, to); ++ +} ++} // namespace ROCKSDB_NAMESPACE + -+int SSTDumpTool::Run(int argc, char const* const* argv, Options options, -+ FILE* out, FILE* err) { - std::string env_uri, fs_uri; - const char* dir_or_file = nullptr; - uint64_t read_num = std::numeric_limits::max(); -@@ -170,8 +176,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - bool has_compression_level_from = false; - bool has_compression_level_to = false; - bool has_specified_compression_types = false; -- std::string from_key; -- std::string to_key; -+ bool silent = false; -+ Slice* from_key = nullptr; -+ Slice* to_key = nullptr; - std::string block_size_str; - std::string compression_level_from_str; - std::string compression_level_to_str; -@@ -197,7 +204,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - int64_t tmp_val; - - for (int i = 1; i < argc; i++) { -- if (strncmp(argv[i], "--env_uri=", 10) == 0) { -+ if (strncmp(argv[i], "--silent", 8) == 0) { -+ silent = true; -+ } else if (strncmp(argv[i], "--env_uri=", 10) == 0) { - env_uri = argv[i] + 10; - } else if (strncmp(argv[i], "--fs_uri=", 9) == 0) { - fs_uri = argv[i] + 9; -@@ -217,13 +226,13 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - } else if (strncmp(argv[i], "--command=", 10) == 0) { - command = argv[i] + 10; - } else if (strncmp(argv[i], "--from=", 7) == 0) { -- from_key = argv[i] + 7; -+ from_key = AssignSlicePrependedWithLength(argv[i] + 7); - has_from = true; - } else if (strncmp(argv[i], "--to=", 5) == 0) { -- to_key = argv[i] + 5; -+ to_key = AssignSlicePrependedWithLength(argv[i] + 5); - has_to = true; - } else if (strncmp(argv[i], "--prefix=", 9) == 0) { -- from_key = argv[i] + 9; -+ from_key = AssignSlicePrependedWithLength( argv[i] + 9); - use_from_as_prefix = true; - } else if (strcmp(argv[i], "--show_properties") == 0) { - show_properties = true; -@@ -273,7 +282,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - std::cerr << pik_status.getState() << "\n"; - retc = -1; - } -- fprintf(stdout, "key=%s\n", ikey.DebugString(true, true).c_str()); -+ fprintf(out, "key=%s\n", ikey.DebugString(true, true).c_str()); - return retc; - } else if (ParseIntArg(argv[i], "--compression_level_from=", - "compression_level_from must be numeric", -@@ -288,9 +297,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_max_dict_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { -- fprintf(stderr, "compression_max_dict_bytes must be a uint32_t: '%s'\n", -+ fprintf(err, "compression_max_dict_bytes must be a uint32_t: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_max_dict_bytes = static_cast(tmp_val); -@@ -298,10 +307,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_zstd_max_train_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { -- fprintf(stderr, -+ fprintf(err, - "compression_zstd_max_train_bytes must be a uint32_t: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_zstd_max_train_bytes = static_cast(tmp_val); -@@ -309,56 +318,56 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_max_dict_buffer_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0) { -- fprintf(stderr, -+ fprintf(err, - "compression_max_dict_buffer_bytes must be positive: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_max_dict_buffer_bytes = static_cast(tmp_val); - } else if (strcmp(argv[i], "--compression_use_zstd_finalize_dict") == 0) { - compression_use_zstd_finalize_dict = true; - } else if (strcmp(argv[i], "--help") == 0) { -- print_help(/*to_stderr*/ false); -+ print_help(/*to_stderr*/ false, err, out); - return 0; - } else if (strcmp(argv[i], "--version") == 0) { - printf("%s\n", GetRocksBuildInfoAsString("sst_dump").c_str()); - return 0; - } else { -- fprintf(stderr, "Unrecognized argument '%s'\n\n", argv[i]); -- print_help(/*to_stderr*/ true); -+ fprintf(err, "Unrecognized argument '%s'\n\n", argv[i]); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - } - - if(has_compression_level_from && has_compression_level_to) { - if(!has_specified_compression_types || compression_types.size() != 1) { -- fprintf(stderr, "Specify one compression type.\n\n"); -+ fprintf(err, "Specify one compression type.\n\n"); - exit(1); - } - } else if(has_compression_level_from || has_compression_level_to) { -- fprintf(stderr, "Specify both --compression_level_from and " -+ fprintf(err, "Specify both --compression_level_from and " - "--compression_level_to.\n\n"); - exit(1); - } - - if (use_from_as_prefix && has_from) { -- fprintf(stderr, "Cannot specify --prefix and --from\n\n"); -+ fprintf(err, "Cannot specify --prefix and --from\n\n"); - exit(1); - } - - if (input_key_hex) { - if (has_from || use_from_as_prefix) { -- from_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(from_key); -+ *from_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(from_key -> ToString()); - } - if (has_to) { -- to_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(to_key); -+ *to_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(to_key->ToString()); - } - } - - if (dir_or_file == nullptr) { -- fprintf(stderr, "file or directory must be specified.\n\n"); -- print_help(/*to_stderr*/ true); -+ fprintf(err, "file or directory must be specified.\n\n"); -+ print_help(/*to_stderr*/ true, err, out); - exit(1); - } - -@@ -373,10 +382,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - Status s = Env::CreateFromUri(config_options, env_uri, fs_uri, &options.env, - &env_guard); - if (!s.ok()) { -- fprintf(stderr, "CreateEnvFromUri: %s\n", s.ToString().c_str()); -+ fprintf(err, "CreateEnvFromUri: %s\n", s.ToString().c_str()); - exit(1); -- } else { -- fprintf(stdout, "options.env is %p\n", options.env); -+ } else if (!silent){ -+ fprintf(out, "options.env is %p\n", options.env); - } - } - -@@ -390,7 +399,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - Status s = env->FileExists(dir_or_file); - // dir_or_file does not exist - if (!s.ok()) { -- fprintf(stderr, "%s%s: No such file or directory\n", s.ToString().c_str(), -+ fprintf(err, "%s%s: No such file or directory\n", s.ToString().c_str(), - dir_or_file); - return 1; - } -@@ -421,10 +430,11 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - - ROCKSDB_NAMESPACE::SstFileDumper dumper( - options, filename, Temperature::kUnknown, readahead_size, -- verify_checksum, output_hex, decode_blob_index); -+ verify_checksum, output_hex, decode_blob_index, EnvOptions(), -+ silent, out, err); - // Not a valid SST - if (!dumper.getStatus().ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), -+ fprintf(err, "%s: %s\n", filename.c_str(), - dumper.getStatus().ToString().c_str()); - continue; - } else { -@@ -433,10 +443,11 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - // where there is at least one valid SST - if (valid_sst_files.size() == 1) { - // from_key and to_key are only used for "check", "scan", or "" -- if (command == "check" || command == "scan" || command == "") { -- fprintf(stdout, "from [%s] to [%s]\n", -- ROCKSDB_NAMESPACE::Slice(from_key).ToString(true).c_str(), -- ROCKSDB_NAMESPACE::Slice(to_key).ToString(true).c_str()); -+ if (!silent && (command == "check" || command == "scan" || -+ command == "")) { -+ fprintf(out, "from [%s] to [%s]\n", -+ from_key->ToString(true).c_str(), -+ to_key->ToString(true).c_str()); - } - } - } -@@ -449,7 +460,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - compression_zstd_max_train_bytes, compression_max_dict_buffer_bytes, - !compression_use_zstd_finalize_dict); - if (!st.ok()) { -- fprintf(stderr, "Failed to recompress: %s\n", st.ToString().c_str()); -+ fprintf(err, "Failed to recompress: %s\n", st.ToString().c_str()); - exit(1); - } - return 0; -@@ -461,10 +472,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - - st = dumper.DumpTable(out_filename); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -+ fprintf(err, "%s: %s\n", filename.c_str(), st.ToString().c_str()); - exit(1); - } else { -- fprintf(stdout, "raw dump written to file %s\n", &out_filename[0]); -+ fprintf(out, "raw dump written to file %s\n", &out_filename[0]); - } - continue; - } -@@ -473,10 +484,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - if (command == "" || command == "scan" || command == "check") { - st = dumper.ReadSequential( - command == "scan", read_num > 0 ? (read_num - total_read) : read_num, -- has_from || use_from_as_prefix, from_key, has_to, to_key, -+ has_from || use_from_as_prefix, *from_key, has_to, *to_key, - use_from_as_prefix); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), -+ fprintf(err, "%s: %s\n", filename.c_str(), - st.ToString().c_str()); - } - total_read += dumper.GetReadNumber(); -@@ -488,10 +499,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - if (command == "verify") { - st = dumper.VerifyChecksum(); - if (!st.ok()) { -- fprintf(stderr, "%s is corrupted: %s\n", filename.c_str(), -+ fprintf(err, "%s is corrupted: %s\n", filename.c_str(), - st.ToString().c_str()); - } else { -- fprintf(stdout, "The file is ok\n"); -+ fprintf(out, "The file is ok\n"); - } - continue; - } -@@ -503,15 +514,15 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - table_properties_from_reader; - st = dumper.ReadTableProperties(&table_properties_from_reader); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -- fprintf(stderr, "Try to use initial table properties\n"); -+ fprintf(err, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -+ fprintf(err, "Try to use initial table properties\n"); - table_properties = dumper.GetInitTableProperties(); - } else { - table_properties = table_properties_from_reader.get(); - } - if (table_properties != nullptr) { - if (show_properties) { -- fprintf(stdout, -+ fprintf(out, - "Table Properties:\n" - "------------------------------\n" - " %s", -@@ -523,18 +534,18 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - total_index_block_size += table_properties->index_size; - total_filter_block_size += table_properties->filter_size; - if (show_properties) { -- fprintf(stdout, -+ fprintf(out, - "Raw user collected properties\n" - "------------------------------\n"); - for (const auto& kv : table_properties->user_collected_properties) { - std::string prop_name = kv.first; - std::string prop_val = Slice(kv.second).ToString(true); -- fprintf(stdout, " # %s: 0x%s\n", prop_name.c_str(), -+ fprintf(out, " # %s: 0x%s\n", prop_name.c_str(), - prop_val.c_str()); - } - } - } else { -- fprintf(stderr, "Reader unexpectedly returned null properties\n"); -+ fprintf(err, "Reader unexpectedly returned null properties\n"); - } - } - } -@@ -555,9 +566,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - // Exit with an error state - if (dir) { - fprintf(stdout, "------------------------------\n"); -- fprintf(stderr, "No valid SST files found in %s\n", dir_or_file); -+ fprintf(err, "No valid SST files found in %s\n", dir_or_file); - } else { -- fprintf(stderr, "%s is not a valid SST file\n", dir_or_file); -+ fprintf(err, "%s is not a valid SST file\n", dir_or_file); - } - return 1; - } else { ++#endif // ROCKSDB_LITE diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index 8fc4e83e7a1d..f0074e0a1ac9 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.utils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.ozone.test.tag.Native; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; @@ -27,6 +28,7 @@ import java.io.ByteArrayInputStream; import java.io.File; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -56,37 +58,27 @@ private static Stream nativeLibraryDirectoryLocations() { @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @ParameterizedTest @MethodSource("nativeLibraryDirectoryLocations") - public void testNativeLibraryLoader( - String nativeLibraryDirectoryLocation) { + public void testNativeLibraryLoader(String nativeLibraryDirectoryLocation) throws NativeLibraryNotLoadedException { Map libraryLoadedMap = new HashMap<>(); NativeLibraryLoader loader = new NativeLibraryLoader(libraryLoadedMap); - try (MockedStatic mockedNativeLibraryLoader = - mockStatic(NativeLibraryLoader.class, - CALLS_REAL_METHODS)) { - mockedNativeLibraryLoader.when(() -> - NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) + try (MockedStatic mockedNativeLibraryLoader = mockStatic(NativeLibraryLoader.class, + CALLS_REAL_METHODS)) { + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) .thenReturn(nativeLibraryDirectoryLocation); - mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()) - .thenReturn(loader); - assertTrue(NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); - assertTrue(NativeLibraryLoader - .isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()).thenReturn(loader); + ManagedRawSSTFileReader.loadLibrary(); + assertTrue(NativeLibraryLoader.isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); // Mocking to force copy random bytes to create a lib file to // nativeLibraryDirectoryLocation. But load library will fail. - mockedNativeLibraryLoader.when(() -> - NativeLibraryLoader.getResourceStream(anyString())) + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getResourceStream(anyString())) .thenReturn(new ByteArrayInputStream(new byte[]{0, 1, 2, 3})); String dummyLibraryName = "dummy_lib"; - NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName); + NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName, Collections.emptyList()); NativeLibraryLoader.isLibraryLoaded(dummyLibraryName); // Checking if the resource with random was copied to a temp file. - File[] libPath = - new File(nativeLibraryDirectoryLocation == null ? "" : - nativeLibraryDirectoryLocation) - .getAbsoluteFile().listFiles((dir, name) -> - name.startsWith(dummyLibraryName) && - name.endsWith(NativeLibraryLoader.getLibOsSuffix())); + File[] libPath = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) + .getAbsoluteFile().listFiles((dir, name) -> name.startsWith(dummyLibraryName) && + name.endsWith(NativeLibraryLoader.getLibOsSuffix())); assertNotNull(libPath); assertEquals(1, libPath.length); assertTrue(libPath[0].delete()); diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java new file mode 100644 index 000000000000..00816e60d7f2 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.apache.hadoop.hdds.utils.TestUtils; +import org.apache.ozone.test.tag.Native; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Named; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Test for ManagedRawSSTFileReaderIterator. + */ +@Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) +class TestManagedRawSSTFileIterator { + + @TempDir + private Path tempDir; + + private File createSSTFileWithKeys( + TreeMap, String> keys) throws Exception { + File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile(); + try (ManagedEnvOptions envOptions = new ManagedEnvOptions(); + ManagedOptions managedOptions = new ManagedOptions(); + ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(envOptions, managedOptions)) { + sstFileWriter.open(file.getAbsolutePath()); + for (Map.Entry, String> entry : keys.entrySet()) { + if (entry.getKey().getValue() == 0) { + sstFileWriter.delete(entry.getKey().getKey().getBytes(StandardCharsets.UTF_8)); + } else { + sstFileWriter.put(entry.getKey().getKey().getBytes(StandardCharsets.UTF_8), + entry.getValue().getBytes(StandardCharsets.UTF_8)); + } + } + sstFileWriter.finish(); + } + return file; + } + + private static Stream keyValueFormatArgs() { + return Stream.of(Arguments.of(Named.of("Key starting with a single quote", "'key%1$d=>"), + Named.of("Value starting with a number ending with a single quote", "%1$dvalue'")), + Arguments.of(Named.of("Key ending with a number", "key%1$d"), + Named.of("Value starting & ending with a number", "%1$dvalue%1$d")), + Arguments.of(Named.of("Key starting with a single quote & ending with a number", "'key%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$d'value%1$d'")), + Arguments.of(Named.of("Key starting with a single quote & ending with a number", "'key%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$d'value%1$d'")), + Arguments.of(Named.of("Key ending with a number", "key%1$d"), + Named.of("Value starting & ending with a number & containing null character & new line character", + "%1$dvalue\n\0%1$d")), + Arguments.of(Named.of("Key ending with a number & containing a null character", "key\0%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$dvalue\r%1$d"))); + } + + @BeforeAll + public static void init() throws NativeLibraryNotLoadedException { + ManagedRawSSTFileReader.loadLibrary(); + } + + + @ParameterizedTest + @MethodSource("keyValueFormatArgs") + public void testSSTDumpIteratorWithKeyFormat(String keyFormat, String valueFormat) throws Exception { + TreeMap, String> keys = IntStream.range(0, 100).boxed().collect(Collectors.toMap( + i -> Pair.of(String.format(keyFormat, i), i % 2), + i -> i % 2 == 0 ? "" : String.format(valueFormat, i), + (v1, v2) -> v2, + TreeMap::new)); + File file = createSSTFileWithKeys(keys); + try (ManagedOptions options = new ManagedOptions(); + ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader<>( + options, file.getAbsolutePath(), 2 * 1024 * 1024)) { + List> testBounds = TestUtils.getTestingBounds(keys.keySet().stream() + .collect(Collectors.toMap(Pair::getKey, Pair::getValue, (v1, v2) -> v1, TreeMap::new))); + for (Optional keyStart : testBounds) { + for (Optional keyEnd : testBounds) { + Map, String> expectedKeys = keys.entrySet().stream() + .filter(e -> keyStart.map(s -> e.getKey().getKey().compareTo(s) >= 0).orElse(true)) + .filter(e -> keyEnd.map(s -> e.getKey().getKey().compareTo(s) < 0).orElse(true)) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + Optional lowerBound = keyStart.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); + Optional upperBound = keyEnd.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); + try (ManagedRawSSTFileIterator iterator + = reader.newIterator(Function.identity(), lowerBound.orElse(null), upperBound.orElse(null))) { + while (iterator.hasNext()) { + ManagedRawSSTFileIterator.KeyValue r = iterator.next(); + String key = StringUtils.bytes2String(r.getKey()); + Pair recordKey = Pair.of(key, r.getType()); + assertThat(expectedKeys).containsKey(recordKey); + assertEquals(Optional.ofNullable(expectedKeys.get(recordKey)).orElse(""), + StringUtils.bytes2String(r.getValue())); + expectedKeys.remove(recordKey); + } + assertEquals(0, expectedKeys.size()); + } finally { + lowerBound.ifPresent(ManagedSlice::close); + upperBound.ifPresent(ManagedSlice::close); + } + } + } + } + } +} diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java deleted file mode 100644 index d2796c19fc50..000000000000 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import com.google.common.primitives.Bytes; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.TestUtils; -import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; -import org.junit.jupiter.api.Named; -import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.TreeMap; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assumptions.assumeTrue; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Test for ManagedSSTDumpIterator. - */ -class TestManagedSSTDumpIterator { - - @TempDir - private Path tempDir; - - private File createSSTFileWithKeys( - TreeMap, String> keys) throws Exception { - File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile(); - try (ManagedEnvOptions envOptions = new ManagedEnvOptions(); - ManagedOptions managedOptions = new ManagedOptions(); - ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter( - envOptions, managedOptions)) { - sstFileWriter.open(file.getAbsolutePath()); - for (Map.Entry, String> entry : keys.entrySet()) { - if (entry.getKey().getValue() == 0) { - sstFileWriter.delete(entry.getKey().getKey() - .getBytes(StandardCharsets.UTF_8)); - } else { - sstFileWriter.put(entry.getKey().getKey() - .getBytes(StandardCharsets.UTF_8), - entry.getValue().getBytes(StandardCharsets.UTF_8)); - } - } - sstFileWriter.finish(); - } - return file; - } - - private static Stream keyValueFormatArgs() { - return Stream.of( - Arguments.of( - Named.of("Key starting with a single quote", - "'key%1$d=>"), - Named.of("Value starting with a number ending with a" + - " single quote", "%1$dvalue'") - ), - Arguments.of( - Named.of("Key ending with a number", "key%1$d"), - Named.of("Value starting & ending with a number", "%1$dvalue%1$d") - ), - Arguments.of( - Named.of("Key starting with a single quote & ending" + - " with a number", "'key%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$d'value%1$d'")), - Arguments.of( - Named.of("Key starting with a single quote & ending" + - " with a number", "'key%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$d'value%1$d'") - ), - Arguments.of( - Named.of("Key ending with a number", "key%1$d"), - Named.of("Value starting & ending with a number " + - "& containing null character & new line character", - "%1$dvalue\n\0%1$d") - ), - Arguments.of( - Named.of("Key ending with a number & containing" + - " a null character", "key\0%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$dvalue\r%1$d") - ) - ); - } - - private static byte[] getBytes(Integer val) { - ByteBuffer destByteBuffer = ByteBuffer.allocate(4); - destByteBuffer.order(ByteOrder.BIG_ENDIAN); - destByteBuffer.putInt(val); - return destByteBuffer.array(); - } - - private static byte[] getBytes(Long val) { - ByteBuffer destByteBuffer = ByteBuffer.allocate(8); - destByteBuffer.order(ByteOrder.BIG_ENDIAN); - destByteBuffer.putLong(val); - return destByteBuffer.array(); - } - - private static byte[] getBytes(String val) { - byte[] b = new byte[val.length()]; - for (int i = 0; i < val.length(); i++) { - b[i] = (byte) val.charAt(i); - } - return b; - } - - private static Stream invalidPipeInputStreamBytes() { - return Stream.of( - Arguments.of(Named.of("Invalid 3 byte integer", - new byte[]{0, 0, 0})), - Arguments.of(Named.of("Invalid 2 byte integer", - new byte[]{0, 0})), - Arguments.of(Named.of("Invalid 1 byte integer", - new byte[]{0, 0})), - Arguments.of(Named.of("Invalid key name length", - Bytes.concat(getBytes(4), getBytes("key")))), - Arguments.of(Named.of("Invalid Unsigned Long length", - Bytes.concat(getBytes(4), getBytes("key1"), - new byte[]{0, 0}))), - Arguments.of(Named.of("Invalid Sequence number", - Bytes.concat(getBytes(4), getBytes("key1")))), - Arguments.of(Named.of("Invalid Type", - Bytes.concat(getBytes(4), getBytes("key1"), - getBytes(4L)))), - Arguments.of(Named.of("Invalid Value", - Bytes.concat(getBytes(4), getBytes("key"), - getBytes(4L), getBytes(0)))), - Arguments.of(Named.of("Invalid Value length", - Bytes.concat(getBytes(4), getBytes("key"), - getBytes(4L), getBytes(1), getBytes(6), - getBytes("val")))) - ); - } - - @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) - @ParameterizedTest - @MethodSource("keyValueFormatArgs") - @Unhealthy("HDDS-9274") - public void testSSTDumpIteratorWithKeyFormat(String keyFormat, - String valueFormat) - throws Exception { - assumeTrue(NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); - - TreeMap, String> keys = - IntStream.range(0, 100).boxed().collect( - Collectors.toMap( - i -> Pair.of(String.format(keyFormat, i), i % 2), - i -> i % 2 == 0 ? "" : String.format(valueFormat, i), - (v1, v2) -> v2, - TreeMap::new)); - File file = createSSTFileWithKeys(keys); - ExecutorService executorService = - new ThreadPoolExecutor(1, 1, 0, TimeUnit.SECONDS, - new ArrayBlockingQueue<>(1), - new ThreadPoolExecutor.CallerRunsPolicy()); - ManagedSSTDumpTool tool = new ManagedSSTDumpTool(executorService, 8192); - List> testBounds = TestUtils.getTestingBounds( - keys.keySet().stream().collect(Collectors.toMap(Pair::getKey, - Pair::getValue, (v1, v2) -> v1, TreeMap::new))); - for (Optional keyStart : testBounds) { - for (Optional keyEnd : testBounds) { - Map, String> expectedKeys = keys.entrySet() - .stream().filter(e -> keyStart.map(s -> e.getKey().getKey() - .compareTo(s) >= 0).orElse(true)) - .filter(e -> keyEnd.map(s -> e.getKey().getKey().compareTo(s) < 0) - .orElse(true)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - Optional lowerBound = keyStart - .map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - Optional upperBound = keyEnd - .map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - try (ManagedOptions options = new ManagedOptions(); - ManagedSSTDumpIterator iterator = - new ManagedSSTDumpIterator(tool, - file.getAbsolutePath(), options, lowerBound.orElse(null), - upperBound.orElse(null)) { - @Override - protected KeyValue getTransformedValue( - Optional value) { - return value.orElse(null); - } - } - ) { - while (iterator.hasNext()) { - ManagedSSTDumpIterator.KeyValue r = iterator.next(); - String key = new String(r.getKey(), StandardCharsets.UTF_8); - Pair recordKey = Pair.of(key, r.getType()); - assertThat(expectedKeys).containsKey(recordKey); - assertEquals(Optional.ofNullable(expectedKeys - .get(recordKey)).orElse(""), - new String(r.getValue(), StandardCharsets.UTF_8)); - expectedKeys.remove(recordKey); - } - assertEquals(0, expectedKeys.size()); - } finally { - lowerBound.ifPresent(ManagedSlice::close); - upperBound.ifPresent(ManagedSlice::close); - } - } - } - executorService.shutdown(); - } - - - @ParameterizedTest - @MethodSource("invalidPipeInputStreamBytes") - public void testInvalidSSTDumpIteratorWithKeyFormat(byte[] inputBytes) - throws ExecutionException, - InterruptedException, IOException { - ByteArrayInputStream byteArrayInputStream = - new ByteArrayInputStream(inputBytes); - ManagedSSTDumpTool tool = mock(ManagedSSTDumpTool.class); - File file = Files.createFile(tempDir.resolve("tmp_file.sst")).toFile(); - Future future = mock(Future.class); - when(future.isDone()).thenReturn(false); - when(future.get()).thenReturn(0); - when(tool.run(any(Map.class), - any(ManagedOptions.class))) - .thenReturn(new ManagedSSTDumpTool.SSTDumpToolTask(future, - byteArrayInputStream)); - try (ManagedOptions options = new ManagedOptions()) { - assertThrows(IllegalStateException.class, - () -> new ManagedSSTDumpIterator( - tool, file.getAbsolutePath(), options) { - @Override - protected KeyValue getTransformedValue( - Optional value) { - return value.orElse(null); - } - }); - } - } -} diff --git a/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties b/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties new file mode 100644 index 000000000000..959da047fb7f --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties @@ -0,0 +1,76 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +#

+# http://www.apache.org/licenses/LICENSE-2.0 +#

+# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +name=PropertiesConfig + +# Checks for config change periodically and reloads +monitorInterval=5 + +filter=read, write +# filter.read.onMatch = DENY avoids logging all READ events +# filter.read.onMatch = ACCEPT permits logging all READ events +# The above two settings ignore the log levels in configuration +# filter.read.onMatch = NEUTRAL permits logging of only those READ events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.read.type = MarkerFilter +filter.read.marker = READ +filter.read.onMatch = NEUTRAL +filter.read.onMismatch = NEUTRAL + +# filter.write.onMatch = DENY avoids logging all WRITE events +# filter.write.onMatch = ACCEPT permits logging all WRITE events +# The above two settings ignore the log levels in configuration +# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.write.type = MarkerFilter +filter.write.marker = WRITE +filter.write.onMatch = NEUTRAL +filter.write.onMismatch = NEUTRAL + +# Log Levels are organized from most specific to least: +# OFF (most specific, no logging) +# FATAL (most specific, little data) +# ERROR +# WARN +# INFO +# DEBUG +# TRACE (least specific, a lot of data) +# ALL (least specific, all data) + +appenders = console, audit +appender.console.type = Console +appender.console.name = STDOUT +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %-5level | %c{1} | %msg%n + +appender.audit.type = File +appender.audit.name = AUDITLOG +appender.audit.fileName=audit.log +appender.audit.layout.type=PatternLayout +appender.audit.layout.pattern= %-5level | %c{1} | %C | %msg%n + +loggers=audit +logger.audit.type=AsyncLogger +logger.audit.name=OMAudit +logger.audit.level = INFO +logger.audit.appenderRefs = audit +logger.audit.appenderRef.file.ref = AUDITLOG + +rootLogger.level = INFO +rootLogger.appenderRefs = stdout +rootLogger.appenderRef.stdout.ref = STDOUT diff --git a/hadoop-hdds/rocks-native/src/test/resources/log4j.properties b/hadoop-hdds/rocks-native/src/test/resources/log4j.properties new file mode 100644 index 000000000000..398786689af3 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/resources/log4j.properties @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# log4j configuration used during build and unit tests + +log4j.rootLogger=INFO,stdout +log4j.threshold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index 829c0d6ac362..e3d365b65051 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -106,43 +106,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> depcheck - - banned-rocksdb-imports - process-sources - - enforce - - - - - false - Use managed RocksObjects under org.apache.hadoop.hdds.utils.db.managed instead. - - org.rocksdb.** - - org.rocksdb.AbstractEventListener - org.rocksdb.Checkpoint - org.rocksdb.ColumnFamilyDescriptor - org.rocksdb.ColumnFamilyHandle - org.rocksdb.ColumnFamilyOptions - org.rocksdb.CompactionJobInfo - org.rocksdb.CompressionType - org.rocksdb.DBOptions - org.rocksdb.FlushOptions - org.rocksdb.LiveFileMetaData - org.rocksdb.Options - org.rocksdb.RocksDB - org.rocksdb.RocksDBException - org.rocksdb.SstFileReader - org.rocksdb.TableProperties - org.rocksdb.ReadOptions - org.rocksdb.SstFileReaderIterator - - org.apache.hadoop.hdds.utils.db.managed.* - - - - diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java index 4e7efc75fe9c..56c8bf411ec4 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java @@ -41,8 +41,7 @@ public static List getLiveSSTFilesForCFs( final ManagedRocksDB rocksDB, List cfs) { final Set cfSet = Sets.newHashSet(cfs); return rocksDB.get().getLiveFilesMetaData().stream() - .filter(lfm -> cfSet.contains( - StringUtils.bytes2String(lfm.columnFamilyName()))) + .filter(lfm -> cfSet.contains(StringUtils.bytes2String(lfm.columnFamilyName()))) .collect(Collectors.toList()); } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java index be949cd4fbdd..ea5060b22a4b 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java @@ -20,16 +20,15 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; import org.apache.hadoop.util.ClosableIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; -import org.rocksdb.ReadOptions; import org.rocksdb.RocksDBException; -import org.rocksdb.SstFileReader; -import org.rocksdb.SstFileReaderIterator; import java.io.IOException; import java.io.UncheckedIOException; @@ -37,9 +36,9 @@ import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; -import java.util.Optional; import java.util.Spliterator; import java.util.Spliterators; +import java.util.function.Function; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -78,7 +77,7 @@ public long getEstimatedTotalKeys() throws RocksDBException { try (ManagedOptions options = new ManagedOptions()) { for (String sstFile : sstFiles) { - try (SstFileReader fileReader = new SstFileReader(options)) { + try (ManagedSstFileReader fileReader = new ManagedSstFileReader(options)) { fileReader.open(sstFile); estimatedSize += fileReader.getTableProperties().getNumEntries(); } @@ -91,115 +90,100 @@ public long getEstimatedTotalKeys() throws RocksDBException { } public Stream getKeyStream(String lowerBound, - String upperBound) throws RocksDBException { + String upperBound) throws RocksDBException { // TODO: [SNAPSHOT] Check if default Options and ReadOptions is enough. - final MultipleSstFileIterator itr = - new MultipleSstFileIterator(sstFiles) { - private ManagedOptions options; - private ReadOptions readOptions; - - private ManagedSlice lowerBoundSLice; - - private ManagedSlice upperBoundSlice; - - @Override - protected void init() { - this.options = new ManagedOptions(); - this.readOptions = new ManagedReadOptions(); - if (Objects.nonNull(lowerBound)) { - this.lowerBoundSLice = new ManagedSlice( - StringUtils.string2Bytes(lowerBound)); - readOptions.setIterateLowerBound(lowerBoundSLice); - } - - if (Objects.nonNull(upperBound)) { - this.upperBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(upperBound)); - readOptions.setIterateUpperBound(upperBoundSlice); - } - } + final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { + private ManagedOptions options; + private ManagedReadOptions readOptions; + + private ManagedSlice lowerBoundSLice; + + private ManagedSlice upperBoundSlice; + + @Override + protected void init() { + this.options = new ManagedOptions(); + this.readOptions = new ManagedReadOptions(); + if (Objects.nonNull(lowerBound)) { + this.lowerBoundSLice = new ManagedSlice( + StringUtils.string2Bytes(lowerBound)); + readOptions.setIterateLowerBound(lowerBoundSLice); + } - @Override - protected ClosableIterator getKeyIteratorForFile(String file) - throws RocksDBException { - return new ManagedSstFileIterator(file, options, readOptions) { - @Override - protected String getIteratorValue( - SstFileReaderIterator iterator) { - return new String(iterator.key(), UTF_8); - } - }; - } + if (Objects.nonNull(upperBound)) { + this.upperBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(upperBound)); + readOptions.setIterateUpperBound(upperBoundSlice); + } + } + @Override + protected ClosableIterator getKeyIteratorForFile(String file) throws RocksDBException { + return new ManagedSstFileIterator(file, options, readOptions) { @Override - public void close() throws UncheckedIOException { - super.close(); - options.close(); - readOptions.close(); - IOUtils.closeQuietly(lowerBoundSLice, upperBoundSlice); + protected String getIteratorValue(ManagedSstFileReaderIterator iterator) { + return new String(iterator.get().key(), UTF_8); } }; + } + + @Override + public void close() throws UncheckedIOException { + super.close(); + options.close(); + readOptions.close(); + IOUtils.closeQuietly(lowerBoundSLice, upperBoundSlice); + } + }; return getStreamFromIterator(itr); } - public Stream getKeyStreamWithTombstone( - ManagedSSTDumpTool sstDumpTool, String lowerBound, - String upperBound) throws RocksDBException { - final MultipleSstFileIterator itr = - new MultipleSstFileIterator(sstFiles) { - //TODO: [SNAPSHOT] Check if default Options is enough. - private ManagedOptions options; - private ManagedSlice lowerBoundSlice; - private ManagedSlice upperBoundSlice; - - @Override - protected void init() { - this.options = new ManagedOptions(); - if (Objects.nonNull(lowerBound)) { - this.lowerBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(lowerBound)); - } - if (Objects.nonNull(upperBound)) { - this.upperBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(upperBound)); - } - } + public Stream getKeyStreamWithTombstone(String lowerBound, String upperBound) throws RocksDBException { + final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { + //TODO: [SNAPSHOT] Check if default Options is enough. + private ManagedOptions options; + private ManagedSlice lowerBoundSlice; + private ManagedSlice upperBoundSlice; + + @Override + protected void init() { + this.options = new ManagedOptions(); + if (Objects.nonNull(lowerBound)) { + this.lowerBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(lowerBound)); + } + if (Objects.nonNull(upperBound)) { + this.upperBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(upperBound)); + } + } - @Override - protected ClosableIterator getKeyIteratorForFile(String file) - throws IOException { - return new ManagedSSTDumpIterator(sstDumpTool, file, - options, lowerBoundSlice, upperBoundSlice) { - @Override - protected String getTransformedValue(Optional value) { - return value.map(v -> StringUtils.bytes2String(v.getKey())) - .orElse(null); - } - }; - } + @Override + protected ClosableIterator getKeyIteratorForFile(String file) { + return new ManagedRawSstFileIterator(file, options, lowerBoundSlice, upperBoundSlice, + keyValue -> StringUtils.bytes2String(keyValue.getKey())); + } - @Override - public void close() throws UncheckedIOException { - super.close(); - options.close(); - IOUtils.closeQuietly(lowerBoundSlice, upperBoundSlice); - } - }; + @Override + public void close() throws UncheckedIOException { + super.close(); + options.close(); + IOUtils.closeQuietly(lowerBoundSlice, upperBoundSlice); + } + }; return getStreamFromIterator(itr); } - private abstract static class ManagedSstFileIterator implements - ClosableIterator { - private SstFileReader fileReader; - private SstFileReaderIterator fileReaderIterator; + private abstract static class ManagedSstFileIterator implements ClosableIterator { + private final ManagedSstFileReader fileReader; + private final ManagedSstFileReaderIterator fileReaderIterator; - ManagedSstFileIterator(String path, ManagedOptions options, - ReadOptions readOptions) + ManagedSstFileIterator(String path, ManagedOptions options, ManagedReadOptions readOptions) throws RocksDBException { - this.fileReader = new SstFileReader(options); + this.fileReader = new ManagedSstFileReader(options); this.fileReader.open(path); - this.fileReaderIterator = fileReader.newIterator(readOptions); - fileReaderIterator.seekToFirst(); + this.fileReaderIterator = ManagedSstFileReaderIterator.managed(fileReader.newIterator(readOptions)); + fileReaderIterator.get().seekToFirst(); } @Override @@ -210,21 +194,48 @@ public void close() { @Override public boolean hasNext() { - return fileReaderIterator.isValid(); + return fileReaderIterator.get().isValid(); } - protected abstract String getIteratorValue(SstFileReaderIterator iterator); + protected abstract String getIteratorValue(ManagedSstFileReaderIterator iterator); @Override public String next() { String value = getIteratorValue(fileReaderIterator); - fileReaderIterator.next(); + fileReaderIterator.get().next(); return value; } } - private abstract static class MultipleSstFileIterator implements - ClosableIterator { + private static class ManagedRawSstFileIterator implements ClosableIterator { + private final ManagedRawSSTFileReader fileReader; + private final ManagedRawSSTFileIterator fileReaderIterator; + private static final int READ_AHEAD_SIZE = 2 * 1024 * 1024; + + ManagedRawSstFileIterator(String path, ManagedOptions options, ManagedSlice lowerBound, ManagedSlice upperBound, + Function keyValueFunction) { + this.fileReader = new ManagedRawSSTFileReader<>(options, path, READ_AHEAD_SIZE); + this.fileReaderIterator = fileReader.newIterator(keyValueFunction, lowerBound, upperBound); + } + + @Override + public void close() { + this.fileReaderIterator.close(); + this.fileReader.close(); + } + + @Override + public boolean hasNext() { + return fileReaderIterator.hasNext(); + } + + @Override + public String next() { + return fileReaderIterator.next(); + } + } + + private abstract static class MultipleSstFileIterator implements ClosableIterator { private final Iterator fileNameIterator; @@ -238,16 +249,13 @@ private MultipleSstFileIterator(Collection files) { protected abstract void init(); - protected abstract ClosableIterator getKeyIteratorForFile(String file) - throws RocksDBException, - IOException; + protected abstract ClosableIterator getKeyIteratorForFile(String file) throws RocksDBException, IOException; @Override public boolean hasNext() { try { do { - if (Objects.nonNull(currentFileIterator) && - currentFileIterator.hasNext()) { + if (Objects.nonNull(currentFileIterator) && currentFileIterator.hasNext()) { return true; } } while (moveToNextFile()); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index 97d015fb2392..08a013fc7c70 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -30,7 +30,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.collections.CollectionUtils; @@ -41,10 +40,13 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.CompactionLogEntryProto; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.Scheduler; +import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; import org.apache.ozone.rocksdb.util.RdbUtil; @@ -53,13 +55,9 @@ import org.rocksdb.AbstractEventListener; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.CompactionJobInfo; -import org.rocksdb.DBOptions; import org.rocksdb.LiveFileMetaData; -import org.rocksdb.Options; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; -import org.rocksdb.SstFileReader; -import org.rocksdb.SstFileReaderIterator; import org.rocksdb.TableProperties; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -171,12 +169,11 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, = new BootstrapStateHandler.Lock(); private ColumnFamilyHandle snapshotInfoTableCFHandle; - private final AtomicInteger tarballRequestCount; private static final String DAG_PRUNING_SERVICE_NAME = "CompactionDagPruningService"; private AtomicBoolean suspended; private ColumnFamilyHandle compactionLogTableCFHandle; - private RocksDB activeRocksDB; + private ManagedRocksDB activeRocksDB; /** * For snapshot diff calculation we only need to track following column @@ -248,7 +245,6 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, } else { this.scheduler = null; } - this.tarballRequestCount = new AtomicInteger(0); } private String createCompactionLogDir(String metadataDirName, @@ -301,7 +297,7 @@ private void createSstBackUpDir() { } @Override - public void close() throws Exception { + public void close() { if (!closed) { synchronized (this) { if (!closed) { @@ -348,32 +344,11 @@ public static void addDebugLevel(Integer level) { DEBUG_LEVEL.add(level); } - /** - * Takes {@link org.rocksdb.Options}. - */ - public void setRocksDBForCompactionTracking(Options rocksOptions, - List list) { - list.add(newCompactionBeginListener()); - list.add(newCompactionCompletedListener()); - rocksOptions.setListeners(list); - } - - public void setRocksDBForCompactionTracking(Options rocksOptions) { - setRocksDBForCompactionTracking(rocksOptions, new ArrayList<>()); - } - - /** - * Takes {@link org.rocksdb.DBOptions}. - */ - public void setRocksDBForCompactionTracking(DBOptions rocksOptions, - List list) { - list.add(newCompactionBeginListener()); - list.add(newCompactionCompletedListener()); - rocksOptions.setListeners(list); - } - - public void setRocksDBForCompactionTracking(DBOptions rocksOptions) { - setRocksDBForCompactionTracking(rocksOptions, new ArrayList<>()); + public void setRocksDBForCompactionTracking(ManagedDBOptions rocksOptions) { + List events = new ArrayList<>(); + events.add(newCompactionBeginListener()); + events.add(newCompactionCompletedListener()); + rocksOptions.setListeners(events); } /** @@ -402,7 +377,7 @@ public synchronized void setCompactionLogTableCFHandle( * Set activeRocksDB to access CompactionLogTable. * @param activeRocksDB RocksDB */ - public synchronized void setActiveRocksDB(RocksDB activeRocksDB) { + public synchronized void setActiveRocksDB(ManagedRocksDB activeRocksDB) { Preconditions.checkNotNull(activeRocksDB, "RocksDB should not be null."); this.activeRocksDB = activeRocksDB; } @@ -435,8 +410,7 @@ private boolean isSnapshotInfoTableEmpty(RocksDB db) { // Note the goal of compaction DAG is to track all compactions that happened // _after_ a DB checkpoint is taken. - try (ManagedRocksIterator it = ManagedRocksIterator.managed( - db.newIterator(snapshotInfoTableCFHandle))) { + try (ManagedRocksIterator it = ManagedRocksIterator.managed(db.newIterator(snapshotInfoTableCFHandle))) { it.get().seekToFirst(); return !it.get().isValid(); } @@ -498,7 +472,6 @@ public void onCompactionBegin(RocksDB db, }; } - private AbstractEventListener newCompactionCompletedListener() { return new AbstractEventListener() { @Override @@ -541,8 +514,6 @@ public void onCompactionCompleted(RocksDB db, return; } - waitForTarballCreation(); - // Add the compaction log entry to Compaction log table. addToCompactionLogTable(compactionLogEntry); @@ -576,29 +547,13 @@ void addToCompactionLogTable(CompactionLogEntry compactionLogEntry) { byte[] key = keyString.getBytes(UTF_8); byte[] value = compactionLogEntry.getProtobuf().toByteArray(); try { - activeRocksDB.put(compactionLogTableCFHandle, key, value); + activeRocksDB.get().put(compactionLogTableCFHandle, key, value); } catch (RocksDBException exception) { // TODO: Revisit exception handling before merging the PR. throw new RuntimeException(exception); } } - /** - * Check if there is any in_progress tarball creation request and wait till - * all tarball creation finish, and it gets notified. - */ - private void waitForTarballCreation() { - while (tarballRequestCount.get() != 0) { - try { - wait(Integer.MAX_VALUE); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.error("Compaction log thread {} is interrupted.", - Thread.currentThread().getName()); - } - } - } - /** * Creates a hard link between provided link and source. * It doesn't throw any exception if {@link Files#createLink} throws @@ -629,9 +584,8 @@ private long getSSTFileSummary(String filename) filename += SST_FILE_EXTENSION; } - try ( - ManagedOptions option = new ManagedOptions(); - SstFileReader reader = new SstFileReader(option)) { + try (ManagedOptions option = new ManagedOptions(); + ManagedSstFileReader reader = new ManagedSstFileReader(option)) { reader.open(getAbsoluteSstFilePath(filename)); @@ -801,7 +755,7 @@ public void loadAllCompactionLogs() { preconditionChecksForLoadAllCompactionLogs(); addEntriesFromLogFilesToDagAndCompactionLogTable(); try (ManagedRocksIterator managedRocksIterator = new ManagedRocksIterator( - activeRocksDB.newIterator(compactionLogTableCFHandle))) { + activeRocksDB.get().newIterator(compactionLogTableCFHandle))) { managedRocksIterator.get().seekToFirst(); while (managedRocksIterator.get().isValid()) { byte[] value = managedRocksIterator.get().value(); @@ -1252,7 +1206,7 @@ private synchronized Pair, List> getOlderFileNodes() { List keysToRemove = new ArrayList<>(); try (ManagedRocksIterator managedRocksIterator = new ManagedRocksIterator( - activeRocksDB.newIterator(compactionLogTableCFHandle))) { + activeRocksDB.get().newIterator(compactionLogTableCFHandle))) { managedRocksIterator.get().seekToFirst(); while (managedRocksIterator.get().isValid()) { CompactionLogEntry compactionLogEntry = CompactionLogEntry @@ -1282,7 +1236,7 @@ private synchronized void removeKeyFromCompactionLogTable( List keysToRemove) { try { for (byte[] key: keysToRemove) { - activeRocksDB.delete(compactionLogTableCFHandle, key); + activeRocksDB.get().delete(compactionLogTableCFHandle, key); } } catch (RocksDBException exception) { // TODO Handle exception properly before merging the PR. @@ -1449,28 +1403,10 @@ public void pruneSstFiles() { } } - public void incrementTarballRequestCount() { - tarballRequestCount.incrementAndGet(); - } - - public void decrementTarballRequestCountAndNotify() { - // Synchronized block is used to ensure that lock is on the same instance notifyAll is being called. - synchronized (this) { - tarballRequestCount.decrementAndGet(); - // Notify compaction threads to continue. - notifyAll(); - } - } - public boolean shouldRun() { return !suspended.get(); } - @VisibleForTesting - public int getTarballRequestCount() { - return tarballRequestCount.get(); - } - @VisibleForTesting public boolean debugEnabled(Integer level) { return DEBUG_LEVEL.contains(level); @@ -1575,18 +1511,19 @@ private CompactionFileInfo toFileInfo(String sstFile, CompactionFileInfo.Builder fileInfoBuilder = new CompactionFileInfo.Builder(fileName); - try (SstFileReader fileReader = new SstFileReader(options)) { + try (ManagedSstFileReader fileReader = new ManagedSstFileReader(options)) { fileReader.open(sstFile); - String columnFamily = StringUtils.bytes2String( - fileReader.getTableProperties().getColumnFamilyName()); - SstFileReaderIterator iterator = fileReader.newIterator(readOptions); - iterator.seekToFirst(); - String startKey = StringUtils.bytes2String(iterator.key()); - iterator.seekToLast(); - String endKey = StringUtils.bytes2String(iterator.key()); - fileInfoBuilder.setStartRange(startKey) - .setEndRange(endKey) - .setColumnFamily(columnFamily); + String columnFamily = StringUtils.bytes2String(fileReader.getTableProperties().getColumnFamilyName()); + try (ManagedSstFileReaderIterator iterator = + ManagedSstFileReaderIterator.managed(fileReader.newIterator(readOptions))) { + iterator.get().seekToFirst(); + String startKey = StringUtils.bytes2String(iterator.get().key()); + iterator.get().seekToLast(); + String endKey = StringUtils.bytes2String(iterator.get().key()); + fileInfoBuilder.setStartRange(startKey) + .setEndRange(endKey) + .setColumnFamily(columnFamily); + } } catch (RocksDBException rocksDBException) { // Ideally it should not happen. If it does just log the exception. // And let the compaction complete without the exception. diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java index 5ddcf8b7e6af..e116868410f1 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; -import org.rocksdb.SstFileReader; import org.rocksdb.TableProperties; import org.rocksdb.RocksDBException; import org.slf4j.Logger; @@ -90,9 +89,9 @@ public static boolean doesSstFileContainKeyRange(String filepath, try ( ManagedOptions options = new ManagedOptions(); - ManagedSstFileReader sstFileReader = ManagedSstFileReader.managed(new SstFileReader(options))) { - sstFileReader.get().open(filepath); - TableProperties properties = sstFileReader.get().getTableProperties(); + ManagedSstFileReader sstFileReader = new ManagedSstFileReader(options)) { + sstFileReader.open(filepath); + TableProperties properties = sstFileReader.getTableProperties(); String tableName = new String(properties.getColumnFamilyName(), UTF_8); if (tableToPrefixMap.containsKey(tableName)) { String prefix = tableToPrefixMap.get(tableName); @@ -100,7 +99,7 @@ public static boolean doesSstFileContainKeyRange(String filepath, try ( ManagedReadOptions readOptions = new ManagedReadOptions(); ManagedSstFileReaderIterator iterator = ManagedSstFileReaderIterator.managed( - sstFileReader.get().newIterator(readOptions))) { + sstFileReader.newIterator(readOptions))) { iterator.get().seek(prefix.getBytes(UTF_8)); String seekResultKey = new String(iterator.get().key(), UTF_8); return seekResultKey.startsWith(prefix); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java new file mode 100644 index 000000000000..8031eca7b0db --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ozone.graph; + +import com.google.common.graph.MutableGraph; +import org.apache.ozone.rocksdiff.CompactionNode; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + +/** + * This class is used for testing the PrintableGraph class. + * It contains methods to test the generation and printing of graphs with different types. + */ +@ExtendWith(MockitoExtension.class) +public class TestPrintableGraph { + @TempDir + private Path dir; + + @Mock + private MutableGraph mutableGraph; + + @ParameterizedTest + @EnumSource(PrintableGraph.GraphType.class) + void testPrintNoGraphMessage(PrintableGraph.GraphType graphType) { + PrintableGraph graph = new PrintableGraph(mutableGraph, graphType); + try { + graph.generateImage(dir.resolve(graphType.name()).toString()); + } catch (IOException e) { + assertEquals("Graph is empty.", e.getMessage()); + } + } + + @ParameterizedTest + @EnumSource(PrintableGraph.GraphType.class) + void testPrintActualGraph(PrintableGraph.GraphType graphType) throws IOException { + Set nodes = Stream.of( + new CompactionNode("fileName1", + 100, 100, "startKey1", "endKey1", "columnFamily1"), + new CompactionNode("fileName2", + 200, 200, "startKey2", "endKey2", null), + new CompactionNode("fileName3", + 300, 300, null, "endKey3", "columnFamily3"), + new CompactionNode("fileName4", + 400, 400, "startKey4", null, "columnFamily4") + ).collect(Collectors.toSet()); + when(mutableGraph.nodes()).thenReturn(nodes); + + PrintableGraph graph = new PrintableGraph(mutableGraph, graphType); + graph.generateImage(dir.resolve(graphType.name()).toString()); + + assertTrue(Files.exists(dir.resolve(graphType.name())), "Graph hasn't been generated"); + } +} diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java index edc491e7c8da..1031992f3b5d 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java @@ -17,18 +17,15 @@ */ package org.apache.ozone.rocksdb.util; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.TestUtils; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -42,10 +39,6 @@ import java.util.Optional; import java.util.SortedMap; import java.util.TreeMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -164,51 +157,38 @@ public void testGetKeyStream(int numberOfFiles) @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @ParameterizedTest @ValueSource(ints = {0, 1, 2, 3, 7, 10}) - @Unhealthy("HDDS-9274") public void testGetKeyStreamWithTombstone(int numberOfFiles) throws RocksDBException, IOException, NativeLibraryNotLoadedException { - assumeTrue(NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + assumeTrue(ManagedRawSSTFileReader.loadLibrary()); Pair, List> data = createDummyData(numberOfFiles); List files = data.getRight(); SortedMap keys = data.getLeft(); - ExecutorService executorService = new ThreadPoolExecutor(0, - 2, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder() - .setNameFormat("snapshot-diff-manager-sst-dump-tool-TID-%d") - .build(), new ThreadPoolExecutor.DiscardPolicy()); - ManagedSSTDumpTool sstDumpTool = - new ManagedSSTDumpTool(executorService, 256); // Getting every possible combination of 2 elements from the sampled keys. // Reading the sst file lying within the given bounds and // validating the keys read from the sst file. List> bounds = TestUtils.getTestingBounds(keys); - try { - for (Optional lowerBound : bounds) { - for (Optional upperBound : bounds) { - // Calculating the expected keys which lie in the given boundary. - Map keysInBoundary = - keys.entrySet().stream().filter(entry -> lowerBound - .map(l -> entry.getKey().compareTo(l) >= 0) - .orElse(true) && - upperBound.map(u -> entry.getKey().compareTo(u) < 0) - .orElse(true)) - .collect(Collectors.toMap(Map.Entry::getKey, - Map.Entry::getValue)); - try (Stream keyStream = new SstFileSetReader(files) - .getKeyStreamWithTombstone(sstDumpTool, lowerBound.orElse(null), - upperBound.orElse(null))) { - keyStream.forEach( - key -> { - assertNotNull(keysInBoundary.remove(key)); - }); - } - assertEquals(0, keysInBoundary.size()); + for (Optional lowerBound : bounds) { + for (Optional upperBound : bounds) { + // Calculating the expected keys which lie in the given boundary. + Map keysInBoundary = + keys.entrySet().stream().filter(entry -> lowerBound + .map(l -> entry.getKey().compareTo(l) >= 0) + .orElse(true) && + upperBound.map(u -> entry.getKey().compareTo(u) < 0) + .orElse(true)) + .collect(Collectors.toMap(Map.Entry::getKey, + Map.Entry::getValue)); + try (Stream keyStream = new SstFileSetReader(files) + .getKeyStreamWithTombstone(lowerBound.orElse(null), + upperBound.orElse(null))) { + keyStream.forEach( + key -> { + assertNotNull(keysInBoundary.remove(key)); + }); } + assertEquals(0, keysInBoundary.size()); } - } finally { - executorService.shutdown(); } } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index b01e4cc2e307..0164e3a23bd5 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -56,9 +56,14 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedCheckpoint; +import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedFlushOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; @@ -70,22 +75,16 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.rocksdb.Checkpoint; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.FlushOptions; import org.rocksdb.LiveFileMetaData; -import org.rocksdb.Options; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; -import org.rocksdb.RocksIterator; -import org.rocksdb.SstFileReader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; +import static org.apache.hadoop.hdds.StringUtils.bytes2String; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL; @@ -137,7 +136,9 @@ public class TestRocksDBCheckpointDiffer { private ConfigurationSource config; private ExecutorService executorService = Executors.newCachedThreadPool(); private RocksDBCheckpointDiffer rocksDBCheckpointDiffer; - private RocksDB activeRocksDB; + private ManagedRocksDB activeRocksDB; + + private ManagedDBOptions dbOptions; private ColumnFamilyHandle keyTableCFHandle; private ColumnFamilyHandle directoryTableCFHandle; private ColumnFamilyHandle fileTableCFHandle; @@ -180,17 +181,16 @@ public void init() throws RocksDBException { ACTIVE_DB_DIR_NAME, config); - ColumnFamilyOptions cfOpts = new ColumnFamilyOptions() - .optimizeUniversalStyleCompaction(); + ManagedColumnFamilyOptions cfOpts = new ManagedColumnFamilyOptions(); + cfOpts.optimizeUniversalStyleCompaction(); List cfDescriptors = getCFDescriptorList(cfOpts); List cfHandles = new ArrayList<>(); - DBOptions dbOptions = new DBOptions() - .setCreateIfMissing(true) - .setCreateMissingColumnFamilies(true); + dbOptions = new ManagedDBOptions(); + dbOptions.setCreateIfMissing(true); + dbOptions.setCreateMissingColumnFamilies(true); rocksDBCheckpointDiffer.setRocksDBForCompactionTracking(dbOptions); - activeRocksDB = RocksDB.open(dbOptions, ACTIVE_DB_DIR_NAME, cfDescriptors, - cfHandles); + activeRocksDB = ManagedRocksDB.open(dbOptions, ACTIVE_DB_DIR_NAME, cfDescriptors, cfHandles); keyTableCFHandle = cfHandles.get(1); directoryTableCFHandle = cfHandles.get(2); fileTableCFHandle = cfHandles.get(3); @@ -599,12 +599,12 @@ void diffAllSnapshots(RocksDBCheckpointDiffer differ) /** * Helper function that creates an RDB checkpoint (= Ozone snapshot). */ - private void createCheckpoint(RocksDB rocksDB) throws RocksDBException { + private void createCheckpoint(ManagedRocksDB rocksDB) throws RocksDBException { LOG.trace("Current time: " + System.currentTimeMillis()); long t1 = System.currentTimeMillis(); - final long snapshotGeneration = rocksDB.getLatestSequenceNumber(); + final long snapshotGeneration = rocksDB.get().getLatestSequenceNumber(); final String cpPath = CP_PATH_PREFIX + snapshotGeneration; // Delete the checkpoint dir if it already exists for the test @@ -630,12 +630,12 @@ private void createCheckpoint(RocksDB rocksDB) throws RocksDBException { } // Flushes the WAL and Creates a RocksDB checkpoint - void createCheckPoint(String dbPathArg, String cpPathArg, RocksDB rocksDB) { + void createCheckPoint(String dbPathArg, String cpPathArg, ManagedRocksDB rocksDB) { LOG.debug("Creating RocksDB '{}' checkpoint at '{}'", dbPathArg, cpPathArg); - try { - rocksDB.flush(new FlushOptions()); - Checkpoint cp = Checkpoint.create(rocksDB); - cp.createCheckpoint(cpPathArg); + try (ManagedFlushOptions flushOptions = new ManagedFlushOptions()) { + rocksDB.get().flush(flushOptions); + ManagedCheckpoint cp = ManagedCheckpoint.create(rocksDB); + cp.get().createCheckpoint(cpPathArg); } catch (RocksDBException e) { throw new RuntimeException(e.getMessage()); } @@ -653,7 +653,7 @@ void printAllSnapshots() { * @return List of ColumnFamilyDescriptor */ static List getCFDescriptorList( - ColumnFamilyOptions cfOpts) { + ManagedColumnFamilyOptions cfOpts) { return asList( new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts), new ColumnFamilyDescriptor("keyTable".getBytes(UTF_8), cfOpts), @@ -670,7 +670,7 @@ private void writeKeysAndCheckpointing() throws RocksDBException { String valueStr = "Val-" + i + "-" + generatedString; byte[] key = keyStr.getBytes(UTF_8); // Put entry in keyTable - activeRocksDB.put(keyTableCFHandle, key, valueStr.getBytes(UTF_8)); + activeRocksDB.get().put(keyTableCFHandle, key, valueStr.getBytes(UTF_8)); if (i % SNAPSHOT_EVERY_SO_MANY_KEYS == 0) { createCheckpoint(activeRocksDB); } @@ -690,33 +690,37 @@ private boolean deleteDirectory(File directoryToBeDeleted) { return directoryToBeDeleted.delete(); } + public List getColumnFamilyDescriptors(String dbPath) throws RocksDBException { + try (ManagedOptions emptyOptions = new ManagedOptions()) { + List cfList = RocksDB.listColumnFamilies(emptyOptions, dbPath); + return cfList.stream().map(ColumnFamilyDescriptor::new).collect(Collectors.toList()); + } + } + // Read from a given RocksDB instance and optionally write all the // keys to a given file. private void readRocksDBInstance(String dbPathArg, - RocksDB rocksDB, + ManagedRocksDB rocksDB, FileWriter file, RocksDBCheckpointDiffer differ) { LOG.debug("Reading RocksDB: " + dbPathArg); boolean createdDB = false; - try (Options options = new Options() - .setParanoidChecks(true) - .setForceConsistencyChecks(false)) { - + try (ManagedDBOptions dbOptions = new ManagedDBOptions()) { + List cfDescriptors = getColumnFamilyDescriptors(dbPathArg); + List cfHandles = new ArrayList<>(); if (rocksDB == null) { - rocksDB = RocksDB.openReadOnly(options, dbPathArg); + rocksDB = ManagedRocksDB.openReadOnly(dbOptions, dbPathArg, cfDescriptors, cfHandles); createdDB = true; } - List liveFileMetaDataList = - rocksDB.getLiveFilesMetaData(); + List liveFileMetaDataList = rocksDB.get().getLiveFilesMetaData(); for (LiveFileMetaData m : liveFileMetaDataList) { LOG.debug("SST File: {}. ", m.fileName()); LOG.debug("\tLevel: {}", m.level()); - LOG.debug("\tTable: {}", toStr(m.columnFamilyName())); - LOG.debug("\tKey Range: {}", toStr(m.smallestKey()) - + " <-> " + toStr(m.largestKey())); + LOG.debug("\tTable: {}", bytes2String(m.columnFamilyName())); + LOG.debug("\tKey Range: {}", bytes2String(m.smallestKey()) + " <-> " + bytes2String(m.largestKey())); if (differ.debugEnabled(DEBUG_DAG_LIVE_NODES)) { printMutableGraphFromAGivenNode( differ.getCompactionNodeMap(), @@ -726,19 +730,20 @@ private void readRocksDBInstance(String dbPathArg, } if (differ.debugEnabled(DEBUG_READ_ALL_DB_KEYS)) { - RocksIterator iter = rocksDB.newIterator(); - for (iter.seekToFirst(); iter.isValid(); iter.next()) { - LOG.debug("Iterator key:" + toStr(iter.key()) + ", " + - "iter value:" + toStr(iter.value())); - if (file != null) { - file.write("iterator key:" + toStr(iter.key()) + ", iter " + - "value:" + toStr(iter.value())); - file.write("\n"); + try (ManagedRocksIterator iter = new ManagedRocksIterator(rocksDB.get().newIterator())) { + for (iter.get().seekToFirst(); iter.get().isValid(); iter.get().next()) { + LOG.debug( + "Iterator key:" + bytes2String(iter.get().key()) + ", iter value:" + bytes2String(iter.get().value())); + if (file != null) { + file.write("iterator key:" + bytes2String(iter.get().key()) + ", iter value:" + + bytes2String(iter.get().value())); + file.write("\n"); + } } } } } catch (IOException | RocksDBException e) { - e.printStackTrace(); + LOG.error("Caught exception while reading from rocksDB.", e); } finally { if (createdDB) { rocksDB.close(); @@ -746,13 +751,6 @@ private void readRocksDBInstance(String dbPathArg, } } - /** - * Return String object encoded in UTF-8 from a byte array. - */ - private String toStr(byte[] bytes) { - return new String(bytes, UTF_8); - } - /** * Helper that traverses the graphs for testing. * @param compactionNodeMap @@ -1322,7 +1320,7 @@ public void testPruneOlderSnapshotsWithCompactionHistory( private int countEntriesInCompactionLogTable() { try (ManagedRocksIterator iterator = new ManagedRocksIterator( - activeRocksDB.newIterator(compactionLogTableCFHandle))) { + activeRocksDB.get().newIterator(compactionLogTableCFHandle))) { iterator.get().seekToFirst(); int count = 0; while (iterator.get().isValid()) { @@ -1842,14 +1840,16 @@ private void createKeys(ColumnFamilyHandle cfh, String valuePrefix, int numberOfKeys) throws RocksDBException { - for (int i = 0; i < numberOfKeys; ++i) { - String generatedString = RandomStringUtils.randomAlphabetic(7); - String keyStr = keyPrefix + i + "-" + generatedString; - String valueStr = valuePrefix + i + "-" + generatedString; - byte[] key = keyStr.getBytes(UTF_8); - activeRocksDB.put(cfh, key, valueStr.getBytes(UTF_8)); - if (i % 10 == 0) { - activeRocksDB.flush(new FlushOptions(), cfh); + try (ManagedFlushOptions flushOptions = new ManagedFlushOptions()) { + for (int i = 0; i < numberOfKeys; ++i) { + String generatedString = RandomStringUtils.randomAlphabetic(7); + String keyStr = keyPrefix + i + "-" + generatedString; + String valueStr = valuePrefix + i + "-" + generatedString; + byte[] key = keyStr.getBytes(UTF_8); + activeRocksDB.get().put(cfh, key, valueStr.getBytes(UTF_8)); + if (i % 10 == 0) { + activeRocksDB.get().flush(flushOptions, cfh); + } } } } @@ -1886,10 +1886,9 @@ public void testDagOnlyContainsDesiredCfh() Stream pathStream = Files.list( Paths.get(rocksDBCheckpointDiffer.getSSTBackupDir()))) { pathStream.forEach(path -> { - try (SstFileReader fileReader = new SstFileReader(options)) { + try (ManagedSstFileReader fileReader = new ManagedSstFileReader(options)) { fileReader.open(path.toAbsolutePath().toString()); - String columnFamily = StringUtils.bytes2String( - fileReader.getTableProperties().getColumnFamilyName()); + String columnFamily = bytes2String(fileReader.getTableProperties().getColumnFamilyName()); assertThat(COLUMN_FAMILIES_TO_TRACK_IN_DAG).contains(columnFamily); } catch (RocksDBException rocksDBException) { fail("Failed to read file: " + path.toAbsolutePath()); diff --git a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml index 50f349186089..dc08720c9687 100644 --- a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml @@ -51,4 +51,9 @@ + + + + + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java new file mode 100644 index 000000000000..0778b9a30dc3 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm; + +import com.fasterxml.jackson.core.JsonEncoding; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.reflect.Array; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.Set; +import javax.management.AttributeNotFoundException; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.QueryExp; +import javax.management.ReflectionException; +import javax.management.RuntimeErrorException; +import javax.management.RuntimeMBeanException; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.TabularData; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Class used to fetch metrics from MBeanServer. + */ +public class FetchMetrics { + private static final Logger LOG = LoggerFactory.getLogger(FetchMetrics.class); + private transient MBeanServer mBeanServer; + private transient JsonFactory jsonFactory; + + public FetchMetrics() { + this.mBeanServer = ManagementFactory.getPlatformMBeanServer(); + this.jsonFactory = new JsonFactory(); + } + + public String getMetrics(String qry) { + try { + JsonGenerator jg = null; + ByteArrayOutputStream opStream = new ByteArrayOutputStream(); + + try { + jg = this.jsonFactory.createGenerator(opStream, JsonEncoding.UTF8); + jg.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); + jg.useDefaultPrettyPrinter(); + jg.writeStartObject(); + if (qry == null) { + qry = "*:*"; + } + this.listBeans(jg, new ObjectName(qry)); + } finally { + if (jg != null) { + jg.close(); + } + } + return new String(opStream.toByteArray(), StandardCharsets.UTF_8); + } catch (IOException | MalformedObjectNameException ex) { + LOG.error("Caught an exception while processing getMetrics request", ex); + } + return null; + } + + private void listBeans(JsonGenerator jg, ObjectName qry) + throws IOException { + LOG.debug("Listing beans for " + qry); + Set names = null; + names = this.mBeanServer.queryNames(qry, (QueryExp) null); + jg.writeArrayFieldStart("beans"); + Iterator it = names.iterator(); + + while (it.hasNext()) { + ObjectName oname = (ObjectName) it.next(); + String code = ""; + + MBeanInfo minfo; + try { + minfo = this.mBeanServer.getMBeanInfo(oname); + code = minfo.getClassName(); + String prs = ""; + + try { + if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) { + prs = "modelerType"; + code = (String) this.mBeanServer.getAttribute(oname, prs); + } + } catch (AttributeNotFoundException | MBeanException | RuntimeException | ReflectionException ex) { + LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", ex); + } + } catch (InstanceNotFoundException var17) { + continue; + } catch (IntrospectionException | ReflectionException ex) { + LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, ex); + continue; + } + jg.writeStartObject(); + jg.writeStringField("name", oname.toString()); + jg.writeStringField("modelerType", code); + MBeanAttributeInfo[] attrs = minfo.getAttributes(); + for (int i = 0; i < attrs.length; ++i) { + this.writeAttribute(jg, oname, attrs[i]); + } + jg.writeEndObject(); + } + jg.writeEndArray(); + } + + private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeInfo attr) throws IOException { + if (attr.isReadable()) { + String attName = attr.getName(); + if (!"modelerType".equals(attName)) { + if (attName.indexOf("=") < 0 && attName.indexOf(":") < 0 && attName.indexOf(" ") < 0) { + Object value = null; + + try { + value = this.mBeanServer.getAttribute(oname, attName); + } catch (RuntimeMBeanException var7) { + if (var7.getCause() instanceof UnsupportedOperationException) { + LOG.debug("getting attribute " + attName + " of " + oname + " threw an exception", var7); + } else { + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", var7); + } + return; + } catch (RuntimeErrorException var8) { + LOG.error("getting attribute {} of {} threw an exception", new Object[]{attName, oname, var8}); + return; + } catch (MBeanException | RuntimeException | ReflectionException ex) { + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", ex); + return; + } catch (AttributeNotFoundException | InstanceNotFoundException ex) { + return; + } + this.writeAttribute(jg, attName, value); + } + } + } + } + + private void writeAttribute(JsonGenerator jg, String attName, Object value) throws IOException { + jg.writeFieldName(attName); + this.writeObject(jg, value); + } + + private void writeObject(JsonGenerator jg, Object value) throws IOException { + if (value == null) { + jg.writeNull(); + } else { + Class c = value.getClass(); + Object entry; + if (c.isArray()) { + jg.writeStartArray(); + int len = Array.getLength(value); + + for (int j = 0; j < len; ++j) { + entry = Array.get(value, j); + this.writeObject(jg, entry); + } + + jg.writeEndArray(); + } else if (value instanceof Number) { + Number n = (Number) value; + jg.writeNumber(n.toString()); + } else if (value instanceof Boolean) { + Boolean b = (Boolean) value; + jg.writeBoolean(b); + } else if (value instanceof CompositeData) { + CompositeData cds = (CompositeData) value; + CompositeType comp = cds.getCompositeType(); + Set keys = comp.keySet(); + jg.writeStartObject(); + Iterator var7 = keys.iterator(); + + while (var7.hasNext()) { + String key = (String) var7.next(); + this.writeAttribute(jg, key, cds.get(key)); + } + + jg.writeEndObject(); + } else if (value instanceof TabularData) { + TabularData tds = (TabularData) value; + jg.writeStartArray(); + Iterator var14 = tds.values().iterator(); + + while (var14.hasNext()) { + entry = var14.next(); + this.writeObject(jg, entry); + } + jg.writeEndArray(); + } else { + jg.writeString(value.toString()); + } + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java index 7b5cbe9f21fc..f47abe65befd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java @@ -342,10 +342,13 @@ private static void blockTillTaskStop(Thread balancingThread) { // NOTE: join should be called outside the lock in hierarchy // to avoid locking others waiting // wait for balancingThread to die with interrupt - balancingThread.interrupt(); LOG.info("Container Balancer waiting for {} to stop", balancingThread); try { - balancingThread.join(); + while (balancingThread.isAlive()) { + // retry interrupt every 5ms to avoid waiting when thread is sleeping + balancingThread.interrupt(); + balancingThread.join(5); + } } catch (InterruptedException exception) { Thread.currentThread().interrupt(); } @@ -383,6 +386,11 @@ public void saveConfiguration(ContainerBalancerConfiguration configuration, .build()); } + @VisibleForTesting + public ContainerBalancerConfiguration getConfig() { + return this.config; + } + private void validateConfiguration(ContainerBalancerConfiguration conf) throws InvalidContainerBalancerConfigurationException { // maxSizeEnteringTarget and maxSizeLeavingSource should by default be diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java index 7e2ba2fd0125..e275d345a5a7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java @@ -338,6 +338,10 @@ public Duration getMoveReplicationTimeout() { return Duration.ofMillis(moveReplicationTimeout); } + public void setMoveReplicationTimeout(Duration duration) { + this.moveReplicationTimeout = duration.toMillis(); + } + public void setMoveReplicationTimeout(long millis) { this.moveReplicationTimeout = millis; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java index d9102a883294..da1b8741cfd3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java @@ -54,6 +54,7 @@ public class ContainerBalancerSelectionCriteria { private ContainerManager containerManager; private Set selectedContainers; private Set excludeContainers; + private Set excludeContainersDueToFailure; private FindSourceStrategy findSourceStrategy; private Map> setMap; @@ -68,6 +69,7 @@ public ContainerBalancerSelectionCriteria( this.replicationManager = replicationManager; this.containerManager = containerManager; selectedContainers = new HashSet<>(); + excludeContainersDueToFailure = new HashSet<>(); excludeContainers = balancerConfiguration.getExcludeContainers(); this.findSourceStrategy = findSourceStrategy; this.setMap = new HashMap<>(); @@ -174,7 +176,8 @@ public boolean shouldBeExcluded(ContainerID containerID, "candidate container. Excluding it.", containerID); return true; } - return excludeContainers.contains(containerID) || selectedContainers.contains(containerID) || + return excludeContainers.contains(containerID) || excludeContainersDueToFailure.contains(containerID) || + selectedContainers.contains(containerID) || !isContainerClosed(container, node) || isECContainerAndLegacyRMEnabled(container) || isContainerReplicatingOrDeleting(containerID) || !findSourceStrategy.canSizeLeaveSource(node, container.getUsedBytes()) @@ -242,6 +245,10 @@ public void setSelectedContainers( this.selectedContainers = selectedContainers; } + public void addToExcludeDueToFailContainers(ContainerID container) { + this.excludeContainersDueToFailure.add(container); + } + private NavigableSet getCandidateContainers(DatanodeDetails node) { NavigableSet newSet = @@ -251,6 +258,9 @@ private NavigableSet getCandidateContainers(DatanodeDetails node) { if (excludeContainers != null) { idSet.removeAll(excludeContainers); } + if (excludeContainersDueToFailure != null) { + idSet.removeAll(excludeContainersDueToFailure); + } if (selectedContainers != null) { idSet.removeAll(selectedContainers); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 94e8cfd04a1a..0bfedd43960f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -75,7 +75,6 @@ public class ContainerBalancerTask implements Runnable { private OzoneConfiguration ozoneConfiguration; private ContainerBalancer containerBalancer; private final SCMContext scmContext; - private double threshold; private int totalNodesInCluster; private double maxDatanodesRatioToInvolvePerIteration; private long maxSizeToMovePerIteration; @@ -84,17 +83,13 @@ public class ContainerBalancerTask implements Runnable { // count actual size moved in bytes private long sizeActuallyMovedInLatestIteration; private int iterations; - private List unBalancedNodes; - private List overUtilizedNodes; - private List underUtilizedNodes; + private final List overUtilizedNodes; + private final List underUtilizedNodes; private List withinThresholdUtilizedNodes; private Set excludeNodes; private Set includeNodes; private ContainerBalancerConfiguration config; private ContainerBalancerMetrics metrics; - private long clusterCapacity; - private long clusterRemaining; - private double clusterAvgUtilisation; private PlacementPolicyValidateProxy placementPolicyValidateProxy; private NetworkTopology networkTopology; private double upperLimit; @@ -150,7 +145,6 @@ public ContainerBalancerTask(StorageContainerManager scm, this.overUtilizedNodes = new ArrayList<>(); this.underUtilizedNodes = new ArrayList<>(); this.withinThresholdUtilizedNodes = new ArrayList<>(); - this.unBalancedNodes = new ArrayList<>(); this.placementPolicyValidateProxy = scm.getPlacementPolicyValidateProxy(); this.networkTopology = scm.getClusterMap(); this.nextIterationIndex = nextIterationIndex; @@ -348,7 +342,6 @@ private boolean initializeIteration() { return false; } - this.threshold = config.getThresholdAsRatio(); this.maxDatanodesRatioToInvolvePerIteration = config.getMaxDatanodesRatioToInvolvePerIteration(); this.maxSizeToMovePerIteration = config.getMaxSizeToMovePerIteration(); @@ -368,22 +361,19 @@ private boolean initializeIteration() { this.totalNodesInCluster = datanodeUsageInfos.size(); - clusterAvgUtilisation = calculateAvgUtilization(datanodeUsageInfos); + double clusterAvgUtilisation = calculateAvgUtilization(datanodeUsageInfos); if (LOG.isDebugEnabled()) { - LOG.debug("Average utilization of the cluster is {}", - clusterAvgUtilisation); + LOG.debug("Average utilization of the cluster is {}", clusterAvgUtilisation); } - // over utilized nodes have utilization(that is, used / capacity) greater - // than upper limit + double threshold = config.getThresholdAsRatio(); + // over utilized nodes have utilization(that is, used / capacity) greater than upper limit this.upperLimit = clusterAvgUtilisation + threshold; - // under utilized nodes have utilization(that is, used / capacity) less - // than lower limit + // under utilized nodes have utilization(that is, used / capacity) less than lower limit this.lowerLimit = clusterAvgUtilisation - threshold; if (LOG.isDebugEnabled()) { - LOG.debug("Lower limit for utilization is {} and Upper limit for " + - "utilization is {}", lowerLimit, upperLimit); + LOG.debug("Lower limit for utilization is {} and Upper limit for utilization is {}", lowerLimit, upperLimit); } long totalOverUtilizedBytes = 0L, totalUnderUtilizedBytes = 0L; @@ -433,12 +423,7 @@ private boolean initializeIteration() { OzoneConsts.GB); Collections.reverse(underUtilizedNodes); - unBalancedNodes = new ArrayList<>( - overUtilizedNodes.size() + underUtilizedNodes.size()); - unBalancedNodes.addAll(overUtilizedNodes); - unBalancedNodes.addAll(underUtilizedNodes); - - if (unBalancedNodes.isEmpty()) { + if (overUtilizedNodes.isEmpty() && underUtilizedNodes.isEmpty()) { LOG.info("Did not find any unbalanced Datanodes."); return false; } @@ -487,7 +472,7 @@ private IterationResult doIteration() { findTargetStrategy.reInitialize(potentialTargets, config, upperLimit); findSourceStrategy.reInitialize(getPotentialSources(), config, lowerLimit); - moveSelectionToFutureMap = new HashMap<>(unBalancedNodes.size()); + moveSelectionToFutureMap = new HashMap<>(underUtilizedNodes.size() + overUtilizedNodes.size()); boolean isMoveGeneratedInThisIteration = false; iterationResult = IterationResult.ITERATION_COMPLETED; boolean canAdaptWhenNearingLimits = true; @@ -553,6 +538,10 @@ private boolean processMoveSelection(DatanodeDetails source, containerID, containerToSourceMap.get(containerID), containerToTargetMap.get(containerID)); + // add source back to queue as a different container can be selected in next run. + findSourceStrategy.addBackSourceDataNode(source); + // exclude the container which caused failure of move to avoid error in next run. + selectionCriteria.addToExcludeDueToFailContainers(moveSelection.getContainerID()); return false; } @@ -563,6 +552,10 @@ private boolean processMoveSelection(DatanodeDetails source, } catch (ContainerNotFoundException e) { LOG.warn("Could not get container {} from Container Manager before " + "starting a container move", containerID, e); + // add source back to queue as a different container can be selected in next run. + findSourceStrategy.addBackSourceDataNode(source); + // exclude the container which caused failure of move to avoid error in next run. + selectionCriteria.addToExcludeDueToFailContainers(moveSelection.getContainerID()); return false; } LOG.info("ContainerBalancer is trying to move container {} with size " + @@ -862,12 +855,22 @@ private boolean moveContainer(DatanodeDetails source, } catch (ContainerNotFoundException e) { LOG.warn("Could not find Container {} for container move", containerID, e); + // add source back to queue as a different container can be selected in next run. + findSourceStrategy.addBackSourceDataNode(source); + // exclude the container which caused failure of move to avoid error in next run. + selectionCriteria.addToExcludeDueToFailContainers(moveSelection.getContainerID()); + metrics.incrementNumContainerMovesFailedInLatestIteration(1); + return false; + } catch (NodeNotFoundException | TimeoutException e) { + LOG.warn("Container move failed for container {}", containerID, e); metrics.incrementNumContainerMovesFailedInLatestIteration(1); return false; - } catch (NodeNotFoundException | TimeoutException | - ContainerReplicaNotFoundException e) { + } catch (ContainerReplicaNotFoundException e) { LOG.warn("Container move failed for container {}", containerID, e); metrics.incrementNumContainerMovesFailedInLatestIteration(1); + // add source back to queue for replica not found only + // the container is not excluded as it is a replica related failure + findSourceStrategy.addBackSourceDataNode(source); return false; } @@ -881,6 +884,16 @@ private boolean moveContainer(DatanodeDetails source, } else { MoveManager.MoveResult result = future.join(); moveSelectionToFutureMap.put(moveSelection, future); + if (result == MoveManager.MoveResult.REPLICATION_FAIL_NOT_EXIST_IN_SOURCE || + result == MoveManager.MoveResult.REPLICATION_FAIL_EXIST_IN_TARGET || + result == MoveManager.MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED || + result == MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_DELETION || + result == MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_REPLICATION) { + // add source back to queue as a different container can be selected in next run. + // the container which caused failure of move is not excluded + // as it is an intermittent failure or a replica related failure + findSourceStrategy.addBackSourceDataNode(source); + } return result == MoveManager.MoveResult.COMPLETED; } } else { @@ -937,8 +950,8 @@ private long ratioToBytes(Long nodeCapacity, double utilizationRatio) { * @return Average utilization value */ @VisibleForTesting - double calculateAvgUtilization(List nodes) { - if (nodes.size() == 0) { + public static double calculateAvgUtilization(List nodes) { + if (nodes.isEmpty()) { LOG.warn("No nodes to calculate average utilization for in " + "ContainerBalancer."); return 0; @@ -948,8 +961,8 @@ private long ratioToBytes(Long nodeCapacity, double utilizationRatio) { for (DatanodeUsageInfo node : nodes) { aggregatedStats.add(node.getScmNodeStat()); } - clusterCapacity = aggregatedStats.getCapacity().get(); - clusterRemaining = aggregatedStats.getRemaining().get(); + long clusterCapacity = aggregatedStats.getCapacity().get(); + long clusterRemaining = aggregatedStats.getRemaining().get(); return (clusterCapacity - clusterRemaining) / (double) clusterCapacity; } @@ -1032,11 +1045,8 @@ private void incSizeSelectedForMoving(DatanodeDetails source, */ private void resetState() { moveManager.resetState(); - this.clusterCapacity = 0L; - this.clusterRemaining = 0L; this.overUtilizedNodes.clear(); this.underUtilizedNodes.clear(); - this.unBalancedNodes.clear(); this.containerToSourceMap.clear(); this.containerToTargetMap.clear(); this.selectedSources.clear(); @@ -1062,15 +1072,14 @@ private boolean isBalancerRunning() { return taskStatus == Status.RUNNING; } - /** - * Gets the list of unBalanced nodes, that is, the over and under utilized - * nodes in the cluster. - * - * @return List of DatanodeUsageInfo containing unBalanced nodes. - */ @VisibleForTesting - List getUnBalancedNodes() { - return unBalancedNodes; + public List getOverUtilizedNodes() { + return overUtilizedNodes; + } + + @VisibleForTesting + public List getUnderUtilizedNodes() { + return underUtilizedNodes; } /** @@ -1098,6 +1107,11 @@ Set getSelectedTargets() { return selectedTargets; } + @VisibleForTesting + Set getSelectedSources() { + return selectedSources; + } + @VisibleForTesting int getCountDatanodesInvolvedPerIteration() { return countDatanodesInvolvedPerIteration; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java index 6350c3c76194..8306d8e1e1ff 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java @@ -107,7 +107,7 @@ public void increaseSizeLeaving(DatanodeDetails dui, long size) { if (currentSize != null) { sizeLeavingNode.put(dui, currentSize + size); //reorder according to the latest sizeLeavingNode - potentialSources.add(nodeManager.getUsageInfo(dui)); + addBackSourceDataNode(dui); return; } LOG.warn("Cannot find datanode {} in candidate source datanodes", @@ -138,6 +138,12 @@ public void removeCandidateSourceDataNode(DatanodeDetails dui) { potentialSources.removeIf(a -> a.getDatanodeDetails().equals(dui)); } + @Override + public void addBackSourceDataNode(DatanodeDetails dn) { + DatanodeUsageInfo dui = nodeManager.getUsageInfo(dn); + potentialSources.add(dui); + } + /** * Checks if specified size can leave a specified target datanode * according to {@link ContainerBalancerConfiguration} @@ -152,11 +158,16 @@ public boolean canSizeLeaveSource(DatanodeDetails source, long size) { if (sizeLeavingNode.containsKey(source)) { long sizeLeavingAfterMove = sizeLeavingNode.get(source) + size; //size can be moved out of source datanode only when the following - //two condition are met. - //1 sizeLeavingAfterMove does not succeed the configured + //three conditions are met. + //1 size should be greater than zero bytes + //2 sizeLeavingAfterMove does not succeed the configured // MaxSizeLeavingTarget - //2 after subtracting sizeLeavingAfterMove, the usage is bigger + //3 after subtracting sizeLeavingAfterMove, the usage is bigger // than or equal to lowerLimit + if (size <= 0) { + LOG.debug("{} bytes container cannot leave datanode {}", size, source.getUuidString()); + return false; + } if (sizeLeavingAfterMove > config.getMaxSizeLeavingSource()) { LOG.debug("{} bytes cannot leave datanode {} because 'size.leaving" + ".source.max' limit is {} and {} bytes have already left.", diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java index 236bdfd98d4c..f9eb24bd3cc6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java @@ -45,6 +45,16 @@ public interface FindSourceStrategy { */ void removeCandidateSourceDataNode(DatanodeDetails dui); + /** + * add the specified data node to the candidate source + * data nodes. + * This method does not check whether the specified Datanode is already present in the Collection. + * Callers must take the responsibility of checking and removing the Datanode before adding, if required. + * + * @param dn datanode to be added to potentialSources + */ + void addBackSourceDataNode(DatanodeDetails dn); + /** * increase the Leaving size of a candidate source data node. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java index 07d38c05dabd..2f77891046d7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java @@ -19,6 +19,8 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; +import com.google.protobuf.ByteString; +import com.google.protobuf.Proto2Utils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -365,7 +367,7 @@ private int processMissingIndexes( final ReconstructECContainersCommand reconstructionCommand = new ReconstructECContainersCommand(container.getContainerID(), sourceDatanodesWithIndex, selectedDatanodes, - int2byte(missingIndexes), + integers2ByteString(missingIndexes), repConfig); // This can throw a CommandTargetOverloadedException, but there is no // point in retrying here. The sources we picked already have the @@ -623,13 +625,13 @@ private void adjustPendingOps(ECContainerReplicaCount replicaCount, Long.MAX_VALUE)); } - private static byte[] int2byte(List src) { + static ByteString integers2ByteString(List src) { byte[] dst = new byte[src.size()]; for (int i = 0; i < src.size(); i++) { dst[i] = src.get(i).byteValue(); } - return dst; + return Proto2Utils.unsafeByteString(dst); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java index a3661243be69..b43caabd8d86 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.container.replication; import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ByteString; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ReplicationConfig; @@ -294,6 +295,7 @@ public synchronized void start() { if (!isRunning()) { LOG.info("Starting Replication Monitor Thread."); running = true; + metrics = ReplicationManagerMetrics.create(this); if (rmConf.isLegacyEnabled()) { legacyReplicationManager.setMetrics(metrics); } @@ -367,8 +369,10 @@ protected void startSubServices() { */ public synchronized void processAll() { if (!shouldRun()) { - LOG.info("Replication Manager is not ready to run until {}ms after " + - "safemode exit", waitTimeInMillis); + if (scmContext.isLeader()) { + LOG.info("Replication Manager is not ready to run until {}ms after " + + "safemode exit", waitTimeInMillis); + } return; } final long start = clock.millis(); @@ -703,10 +707,10 @@ private void adjustPendingOpsAndMetrics(ContainerInfo containerInfo, } else if (cmd.getType() == Type.reconstructECContainersCommand) { ReconstructECContainersCommand rcc = (ReconstructECContainersCommand) cmd; List targets = rcc.getTargetDatanodes(); - byte[] targetIndexes = rcc.getMissingContainerIndexes(); - for (int i = 0; i < targetIndexes.length; i++) { + final ByteString targetIndexes = rcc.getMissingContainerIndexes(); + for (int i = 0; i < targetIndexes.size(); i++) { containerReplicaPendingOps.scheduleAddReplica( - containerInfo.containerID(), targets.get(i), targetIndexes[i], + containerInfo.containerID(), targets.get(i), targetIndexes.byteAt(i), scmDeadlineEpochMs); } getMetrics().incrEcReconstructionCmdsSentTotal(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java index 5c3ee4e29aec..eb75db9bd504 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java @@ -235,10 +235,15 @@ public ReplicationManagerMetrics(ReplicationManager manager) { } public static ReplicationManagerMetrics create(ReplicationManager manager) { - return DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME, - "SCM Replication manager (closed container replication) related " - + "metrics", - new ReplicationManagerMetrics(manager)); + ReplicationManagerMetrics replicationManagerMetrics = (ReplicationManagerMetrics) + DefaultMetricsSystem.instance().getSource(METRICS_SOURCE_NAME); + if (replicationManagerMetrics == null) { + return DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME, + "SCM Replication manager (closed container replication) related " + + "metrics", + new ReplicationManagerMetrics(manager)); + } + return replicationManagerMetrics; } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java index 15163bf3e6a6..9d65eae06b15 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java @@ -17,12 +17,13 @@ package org.apache.hadoop.hdds.scm.ha; -import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.SCMRatisResponseProto; import org.apache.hadoop.hdds.scm.ha.io.CodecFactory; import org.apache.ratis.protocol.Message; import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; /** * Represents the response from RatisServer. @@ -72,13 +73,11 @@ public static Message encode(final Object result) } final Class type = result.getClass(); - final ByteString value = CodecFactory.getCodec(type).serialize(result); - final SCMRatisResponseProto response = SCMRatisResponseProto.newBuilder() - .setType(type.getName()).setValue(value).build(); - return Message.valueOf( - org.apache.ratis.thirdparty.com.google.protobuf.ByteString.copyFrom( - response.toByteArray())); + .setType(type.getName()) + .setValue(CodecFactory.getCodec(type).serialize(result)) + .build(); + return Message.valueOf(UnsafeByteOperations.unsafeWrap(response.toByteString().asReadOnlyByteBuffer())); } public static SCMRatisResponse decode(RaftClientReply reply) @@ -87,14 +86,13 @@ public static SCMRatisResponse decode(RaftClientReply reply) return new SCMRatisResponse(reply.getException()); } - final byte[] response = reply.getMessage().getContent().toByteArray(); + final ByteString response = reply.getMessage().getContent(); - if (response.length == 0) { + if (response.isEmpty()) { return new SCMRatisResponse(); } - final SCMRatisResponseProto responseProto = SCMRatisResponseProto - .parseFrom(response); + final SCMRatisResponseProto responseProto = SCMRatisResponseProto.parseFrom(response.toByteArray()); try { final Class type = ReflectionUtil.getClass(responseProto.getType()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java index 5c97cff092ff..70dffba27ec0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java @@ -230,9 +230,7 @@ public SCMRatisResponse submitRequest(SCMRatisRequest request) final RaftClientReply raftClientReply = server.submitClientRequestAsync(raftClientRequest) .get(requestTimeout, TimeUnit.MILLISECONDS); - if (LOG.isDebugEnabled()) { - LOG.info("request {} Reply {}", raftClientRequest, raftClientReply); - } + LOG.debug("request {} Reply {}", raftClientRequest, raftClientReply); return SCMRatisResponse.decode(raftClientReply); } @@ -335,8 +333,9 @@ public boolean addSCM(AddSCMRequest request) throws IOException { } return raftClientReply.isSuccess(); } catch (IOException e) { - LOG.error("Failed to update Ratis configuration and add new peer. " + - "Cannot add new SCM: {}.", scm.getScmId(), e); + LOG.warn("Failed to update Ratis configuration and add new peer. " + + "Cannot add new SCM: {}. {}", scm.getScmId(), e.getMessage()); + LOG.debug("addSCM call failed due to: ", e); throw e; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java index a5583b48b107..7eab815446ca 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -354,15 +354,6 @@ public void notifyTermIndexUpdated(long term, long index) { } if (currentLeaderTerm.get() == term) { - // On leader SCM once after it is ready, notify SCM services and also set - // leader ready in SCMContext. - if (scm.getScmHAManager().getRatisServer().getDivision().getInfo() - .isLeaderReady()) { - scm.getScmContext().setLeaderReady(); - scm.getSCMServiceManager().notifyStatusChanged(); - scm.getFinalizationManager().onLeaderReady(); - } - // Means all transactions before this term have been applied. // This means after a restart, all pending transactions have been applied. // Perform @@ -378,6 +369,18 @@ public void notifyTermIndexUpdated(long term, long index) { } } + @Override + public void notifyLeaderReady() { + if (!isInitialized) { + return; + } + // On leader SCM once after it is ready, notify SCM services and also set + // leader ready in SCMContext. + scm.getScmContext().setLeaderReady(); + scm.getSCMServiceManager().notifyStatusChanged(); + scm.getFinalizationManager().onLeaderReady(); + } + @Override public void notifyConfigurationChanged(long term, long index, RaftProtos.RaftConfigurationProto newRaftConfiguration) { @@ -440,7 +443,7 @@ public void close() throws IOException { transactionBuffer.close(); HadoopExecutors. shutdown(installSnapshotExecutor, LOG, 5, TimeUnit.SECONDS); - } else { + } else if (!scm.isStopped()) { scm.shutDown("scm statemachine is closed by ratis, terminate SCM"); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BigIntegerCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BigIntegerCodec.java index d2a4423ef2af..8d818c61d90b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BigIntegerCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BigIntegerCodec.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.ha.io; import com.google.protobuf.ByteString; +import com.google.protobuf.Proto2Utils; import java.math.BigInteger; @@ -28,7 +29,8 @@ public class BigIntegerCodec implements Codec { @Override public ByteString serialize(Object object) { - return ByteString.copyFrom(((BigInteger)object).toByteArray()); + // BigInteger returns a new byte[]. + return Proto2Utils.unsafeByteString(((BigInteger)object).toByteArray()); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BooleanCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BooleanCodec.java index 4cf0a7956784..f2abe1a0776c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BooleanCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BooleanCodec.java @@ -19,19 +19,20 @@ import com.google.protobuf.ByteString; -import static java.nio.charset.StandardCharsets.UTF_8; - /** * {@link Codec} for {@code Boolean} objects. */ public class BooleanCodec implements Codec { + static final ByteString TRUE = ByteString.copyFromUtf8(Boolean.TRUE.toString()); + static final ByteString FALSE = ByteString.copyFromUtf8(Boolean.FALSE.toString()); + @Override public ByteString serialize(Object object) { - return ByteString.copyFrom(((Boolean) object).toString().getBytes(UTF_8)); + return ((Boolean) object) ? TRUE : FALSE; } @Override - public Object deserialize(Class type, ByteString value) { - return Boolean.parseBoolean(new String(value.toByteArray(), UTF_8)); + public Boolean deserialize(Class type, ByteString value) { + return value.equals(TRUE) ? Boolean.TRUE : Boolean.FALSE; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/EnumCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/EnumCodec.java index 9c184ca31d63..272237d113ed 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/EnumCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/EnumCodec.java @@ -20,6 +20,7 @@ import com.google.common.primitives.Ints; import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Proto2Utils; import com.google.protobuf.ProtocolMessageEnum; import org.apache.hadoop.hdds.scm.ha.ReflectionUtil; @@ -33,7 +34,8 @@ public class EnumCodec implements Codec { @Override public ByteString serialize(Object object) throws InvalidProtocolBufferException { - return ByteString.copyFrom(Ints.toByteArray( + // toByteArray returns a new array + return Proto2Utils.unsafeByteString(Ints.toByteArray( ((ProtocolMessageEnum) object).getNumber())); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/IntegerCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/IntegerCodec.java index 8607cb7673a8..4e1217cf21e9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/IntegerCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/IntegerCodec.java @@ -21,6 +21,7 @@ import com.google.common.primitives.Ints; import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Proto2Utils; /** * Encodes/decodes an integer to a byte string. @@ -29,7 +30,8 @@ public class IntegerCodec implements Codec { @Override public ByteString serialize(Object object) throws InvalidProtocolBufferException { - return ByteString.copyFrom(Ints.toByteArray((Integer) object)); + // toByteArray returns a new array + return Proto2Utils.unsafeByteString(Ints.toByteArray((Integer) object)); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/LongCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/LongCodec.java index 512449afc7fa..5f2a5ed00734 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/LongCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/LongCodec.java @@ -20,6 +20,7 @@ import com.google.common.primitives.Longs; import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Proto2Utils; /** * {@link Codec} for {@code Long} objects. @@ -29,7 +30,8 @@ public class LongCodec implements Codec { @Override public ByteString serialize(Object object) throws InvalidProtocolBufferException { - return ByteString.copyFrom(Longs.toByteArray((Long) object)); + // toByteArray returns a new array + return Proto2Utils.unsafeByteString(Longs.toByteArray((Long) object)); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ManagedSecretKeyCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ManagedSecretKeyCodec.java index 32705bb2a7ed..e2394c67b254 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ManagedSecretKeyCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ManagedSecretKeyCodec.java @@ -31,7 +31,7 @@ public class ManagedSecretKeyCodec implements Codec { public ByteString serialize(Object object) throws InvalidProtocolBufferException { ManagedSecretKey secretKey = (ManagedSecretKey) object; - return ByteString.copyFrom(secretKey.toProtobuf().toByteArray()); + return secretKey.toProtobuf().toByteString(); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/StringCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/StringCodec.java index b14884cfa69e..f42f15cb2221 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/StringCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/StringCodec.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.ha.io; import com.google.protobuf.ByteString; +import com.google.protobuf.Proto2Utils; import static java.nio.charset.StandardCharsets.UTF_8; @@ -27,7 +28,8 @@ public class StringCodec implements Codec { @Override public ByteString serialize(Object object) { - return ByteString.copyFrom(((String) object).getBytes(UTF_8)); + // getBytes returns a new array + return Proto2Utils.unsafeByteString(((String) object).getBytes(UTF_8)); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/X509CertificateCodec.java index 9a24baa85fcb..4261cfbdd7b0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/X509CertificateCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/X509CertificateCodec.java @@ -19,6 +19,7 @@ import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Proto2Utils; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import java.security.cert.X509Certificate; @@ -35,7 +36,8 @@ public ByteString serialize(Object object) try { String certString = CertificateCodec.getPEMEncodedString((X509Certificate) object); - return ByteString.copyFrom(certString.getBytes(UTF_8)); + // getBytes returns a new array + return Proto2Utils.unsafeByteString(certString.getBytes(UTF_8)); } catch (Exception ex) { throw new InvalidProtocolBufferException( "X509Certificate cannot be decoded: " + ex.getMessage()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java index fbfbb49c2521..7b10f60a5755 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java @@ -35,6 +35,6 @@ public interface DatanodeAdminMonitor extends Runnable { void stopMonitoring(DatanodeDetails dn); Set getTrackedNodes(); void setMetrics(NodeDecommissionMetrics metrics); - Map> getContainersReplicatedOnNode(DatanodeDetails dn) + Map> getContainersPendingReplication(DatanodeDetails dn) throws NodeNotFoundException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java index d7975ff1e58e..23bf41dc83e8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java @@ -494,7 +494,8 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) return underReplicated == 0 && unclosed == 0; } - public Map> getContainersReplicatedOnNode(DatanodeDetails dn) { + @Override + public Map> getContainersPendingReplication(DatanodeDetails dn) { Iterator iterator = trackedNodes.iterator(); while (iterator.hasNext()) { TrackedNode trackedNode = iterator.next(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index 7893e90812dc..ab296fc52bf8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -354,5 +354,4 @@ public int hashCode() { public boolean equals(Object obj) { return super.equals(obj); } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index 38e59b89e767..a593062bcdb8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -19,12 +19,17 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -38,10 +43,12 @@ import java.net.URISyntaxException; import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -56,8 +63,11 @@ public class NodeDecommissionManager { private final DatanodeAdminMonitor monitor; private final NodeManager nodeManager; + private ContainerManager containerManager; private final SCMContext scmContext; private final boolean useHostnames; + private Integer maintenanceReplicaMinimum; + private Integer maintenanceRemainingRedundancy; // Decommissioning and Maintenance mode progress related metrics. private final NodeDecommissionMetrics metrics; @@ -244,18 +254,14 @@ private DatanodeDetails findDnWithMostRecentHeartbeat( * @return True if port is used by the datanode. False otherwise. */ private boolean validateDNPortMatch(int port, DatanodeDetails dn) { - for (DatanodeDetails.Port p : dn.getPorts()) { - if (p.getValue() == port) { - return true; - } - } - return false; + return dn.hasPort(port); } - public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, + public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, ContainerManager cm, SCMContext scmContext, EventPublisher eventQueue, ReplicationManager rm) { this.nodeManager = nm; + this.containerManager = cm; this.scmContext = scmContext; executor = Executors.newScheduledThreadPool(1, @@ -285,6 +291,8 @@ public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, ScmConfigKeys.OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL_DEFAULT, TimeUnit.SECONDS); } + setMaintenanceConfigs(config.getInt("hdds.scm.replication.maintenance.replica.minimum", 2), + config.getInt("hdds.scm.replication.maintenance.remaining.redundancy", 1)); monitor = new DatanodeAdminMonitorImpl(config, eventQueue, nodeManager, rm); @@ -294,9 +302,9 @@ public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, TimeUnit.SECONDS); } - public Map> getContainersReplicatedOnNode(DatanodeDetails dn) + public Map> getContainersPendingReplication(DatanodeDetails dn) throws NodeNotFoundException { - return getMonitor().getContainersReplicatedOnNode(dn); + return getMonitor().getContainersPendingReplication(dn); } @VisibleForTesting @@ -305,9 +313,20 @@ public DatanodeAdminMonitor getMonitor() { } public synchronized List decommissionNodes( - List nodes) { + List nodes, boolean force) { List errors = new ArrayList<>(); List dns = mapHostnamesToDatanodes(nodes, errors); + // add check for fail-early if force flag is not set + if (!force) { + LOG.info("Force flag = {}. Checking if decommission is possible for dns: {}", force, dns); + boolean decommissionPossible = checkIfDecommissionPossible(dns, errors); + if (!decommissionPossible) { + LOG.error("Cannot decommission nodes as sufficient node are not available."); + return errors; + } + } else { + LOG.info("Force flag = {}. Skip checking if decommission is possible for dns: {}", force, dns); + } for (DatanodeDetails dn : dns) { try { startDecommission(dn); @@ -368,6 +387,62 @@ public synchronized void startDecommission(DatanodeDetails dn) } } + private synchronized boolean checkIfDecommissionPossible(List dns, List errors) { + int numDecom = dns.size(); + List validDns = new ArrayList<>(dns); + int inServiceTotal = nodeManager.getNodeCount(NodeStatus.inServiceHealthy()); + for (DatanodeDetails dn : dns) { + try { + NodeStatus nodeStatus = getNodeStatus(dn); + NodeOperationalState opState = nodeStatus.getOperationalState(); + if (opState != NodeOperationalState.IN_SERVICE) { + numDecom--; + validDns.remove(dn); + } + } catch (NodeNotFoundException ex) { + numDecom--; + validDns.remove(dn); + } + } + + for (DatanodeDetails dn : validDns) { + Set containers; + try { + containers = nodeManager.getContainers(dn); + } catch (NodeNotFoundException ex) { + LOG.warn("The host {} was not found in SCM. Ignoring the request to " + + "decommission it", dn.getHostName()); + continue; // ignore the DN and continue to next one + } + + for (ContainerID cid : containers) { + ContainerInfo cif; + try { + cif = containerManager.getContainer(cid); + } catch (ContainerNotFoundException ex) { + LOG.warn("Could not find container info for container {}.", cid); + continue; // ignore the container and continue to next one + } + synchronized (cif) { + if (cif.getState().equals(HddsProtos.LifeCycleState.DELETED) || + cif.getState().equals(HddsProtos.LifeCycleState.DELETING)) { + continue; + } + int reqNodes = cif.getReplicationConfig().getRequiredNodes(); + if ((inServiceTotal - numDecom) < reqNodes) { + String errorMsg = "Insufficient nodes. Tried to decommission " + dns.size() + + " nodes of which " + numDecom + " nodes were valid. Cluster has " + inServiceTotal + + " IN-SERVICE nodes, " + reqNodes + " of which are required for minimum replication. "; + LOG.info(errorMsg + "Failing due to datanode : {}, container : {}", dn, cid); + errors.add(new DatanodeAdminError("AllHosts", errorMsg)); + return false; + } + } + } + } + return true; + } + public synchronized List recommissionNodes( List nodes) { List errors = new ArrayList<>(); @@ -406,9 +481,20 @@ public synchronized void recommission(DatanodeDetails dn) } public synchronized List startMaintenanceNodes( - List nodes, int endInHours) { + List nodes, int endInHours, boolean force) { List errors = new ArrayList<>(); List dns = mapHostnamesToDatanodes(nodes, errors); + // add check for fail-early if force flag is not set + if (!force) { + LOG.info("Force flag = {}. Checking if maintenance is possible for dns: {}", force, dns); + boolean maintenancePossible = checkIfMaintenancePossible(dns, errors); + if (!maintenancePossible) { + LOG.error("Cannot put nodes to maintenance as sufficient node are not available."); + return errors; + } + } else { + LOG.info("Force flag = {}. Skip checking if maintenance is possible for dns: {}", force, dns); + } for (DatanodeDetails dn : dns) { try { startMaintenance(dn, endInHours); @@ -454,6 +540,73 @@ public synchronized void startMaintenance(DatanodeDetails dn, int endInHours) } } + private synchronized boolean checkIfMaintenancePossible(List dns, List errors) { + int numMaintenance = dns.size(); + List validDns = dns.stream().collect(Collectors.toList()); + Collections.copy(validDns, dns); + int inServiceTotal = nodeManager.getNodeCount(NodeStatus.inServiceHealthy()); + for (DatanodeDetails dn : dns) { + try { + NodeStatus nodeStatus = getNodeStatus(dn); + NodeOperationalState opState = nodeStatus.getOperationalState(); + if (opState != NodeOperationalState.IN_SERVICE) { + numMaintenance--; + validDns.remove(dn); + } + } catch (NodeNotFoundException ex) { + numMaintenance--; + validDns.remove(dn); + } + } + + for (DatanodeDetails dn : validDns) { + Set containers; + try { + containers = nodeManager.getContainers(dn); + } catch (NodeNotFoundException ex) { + LOG.warn("The host {} was not found in SCM. Ignoring the request to " + + "enter maintenance", dn.getHostName()); + errors.add(new DatanodeAdminError(dn.getHostName(), + "The host was not found in SCM")); + continue; // ignore the DN and continue to next one + } + + for (ContainerID cid : containers) { + ContainerInfo cif; + try { + cif = containerManager.getContainer(cid); + } catch (ContainerNotFoundException ex) { + continue; // ignore the container and continue to next one + } + synchronized (cif) { + if (cif.getState().equals(HddsProtos.LifeCycleState.DELETED) || + cif.getState().equals(HddsProtos.LifeCycleState.DELETING)) { + continue; + } + + int minInService; + HddsProtos.ReplicationType replicationType = cif.getReplicationType(); + if (replicationType.equals(HddsProtos.ReplicationType.EC)) { + int reqNodes = cif.getReplicationConfig().getRequiredNodes(); + int data = ((ECReplicationConfig)cif.getReplicationConfig()).getData(); + minInService = Math.min((data + maintenanceRemainingRedundancy), reqNodes); + } else { + minInService = maintenanceReplicaMinimum; + } + if ((inServiceTotal - numMaintenance) < minInService) { + String errorMsg = "Insufficient nodes. Tried to start maintenance for " + dns.size() + + " nodes of which " + numMaintenance + " nodes were valid. Cluster has " + inServiceTotal + + " IN-SERVICE nodes, " + minInService + " of which are required for minimum replication. "; + LOG.info(errorMsg + "Failing due to datanode : {}, container : {}", dn, cid); + errors.add(new DatanodeAdminError("AllHosts", errorMsg)); + return false; + } + } + } + } + return true; + } + /** * Stops the decommission monitor from running when SCM is shutdown. */ @@ -485,4 +638,12 @@ public void onBecomeLeader() { } }); } + + @VisibleForTesting + public void setMaintenanceConfigs(int replicaMinimum, int remainingRedundancy) { + synchronized (this) { + maintenanceRemainingRedundancy = remainingRedundancy; + maintenanceReplicaMinimum = replicaMinimum; + } + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 21bcd1f78a2b..25be60945a91 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -42,6 +42,7 @@ import jakarta.annotation.Nullable; import java.io.Closeable; +import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Set; @@ -428,4 +429,14 @@ default HDDSLayoutVersionManager getLayoutVersionManager() { } default void forceNodesToHealthyReadOnly() { } + + /** + * This API allows removal of only DECOMMISSIONED, IN_MAINTENANCE and DEAD nodes + * from NodeManager data structures and cleanup memory. + * @param datanodeDetails + * @throws NodeNotFoundException + */ + default void removeNode(DatanodeDetails datanodeDetails) throws NodeNotFoundException, IOException { + + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index 2f5f93c1160a..4543a49b41c5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -738,7 +738,7 @@ public void run() { */ public synchronized void forceNodesToHealthyReadOnly() { try { - List nodes = nodeStateMap.filterNodes(null, HEALTHY); + List nodes = nodeStateMap.getDatanodeInfos(null, HEALTHY); for (DatanodeInfo node : nodes) { nodeStateMap.updateNodeHealthState(node.getUuid(), HEALTHY_READONLY); @@ -1033,4 +1033,8 @@ ScheduledFuture unpause() { return healthCheckFuture; } + + protected void removeNode(DatanodeDetails datanodeDetails) { + nodeStateMap.removeNode(datanodeDetails); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java index 7b1d6dd27d3a..3aff2f456e4f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java @@ -261,5 +261,4 @@ public int compareTo(NodeStatus o) { } return order; } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index a149998db8b9..3790214f5268 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -691,6 +691,11 @@ public void processLayoutVersionReport(DatanodeDetails datanodeDetails, return; } + sendFinalizeToDatanodeIfNeeded(datanodeDetails, layoutVersionReport); + } + + protected void sendFinalizeToDatanodeIfNeeded(DatanodeDetails datanodeDetails, + LayoutVersionProto layoutVersionReport) { // Software layout version is hardcoded to the SCM. int scmSlv = scmLayoutVersionManager.getSoftwareLayoutVersion(); int dnSlv = layoutVersionReport.getSoftwareLayoutVersion(); @@ -1642,4 +1647,36 @@ private ReentrantReadWriteLock.WriteLock writeLock() { private ReentrantReadWriteLock.ReadLock readLock() { return lock.readLock(); } + + /** + * This API allows removal of only DECOMMISSIONED and DEAD nodes from NodeManager data structures and cleanup memory. + * This API call is having a pre-condition before removal of node like following resources to be removed: + * --- all pipelines for datanode should be closed. + * --- all containers for datanode should be closed. + * --- remove all containers replicas maintained by datanode. + * --- clears all SCM DeletedBlockLog transaction records associated with datanode. + * + * @param datanodeDetails + * @throws NodeNotFoundException + */ + @Override + public void removeNode(DatanodeDetails datanodeDetails) throws NodeNotFoundException, IOException { + writeLock().lock(); + try { + NodeStatus nodeStatus = this.getNodeStatus(datanodeDetails); + if (datanodeDetails.isDecommissioned() || nodeStatus.isDead()) { + if (clusterMap.contains(datanodeDetails)) { + clusterMap.remove(datanodeDetails); + } + nodeStateManager.removeNode(datanodeDetails); + removeFromDnsToUuidMap(datanodeDetails.getUuid(), datanodeDetails.getIpAddress()); + final List cmdList = getCommandQueue(datanodeDetails.getUuid()); + LOG.info("Clearing command queue of size {} for DN {}", cmdList.size(), datanodeDetails); + } else { + LOG.warn("Node not decommissioned or dead, cannot remove: {}", datanodeDetails); + } + } finally { + writeLock().unlock(); + } + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java deleted file mode 100644 index c0f46f15fe20..000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .NO_SUCH_DATANODE; - -/** - * This data structure maintains the list of containers that is on a datanode. - * This information is built from the DN container reports. - */ -public class Node2ContainerMap extends Node2ObjectsMap { - - /** - * Constructs a Node2ContainerMap Object. - */ - public Node2ContainerMap() { - super(); - } - - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - public Set getContainers(UUID datanode) { - return getObjects(datanode); - } - - /** - * Insert a new datanode into Node2Container Map. - * - * @param datanodeID -- Datanode UUID - * @param containerIDs - List of ContainerIDs. - */ - @Override - public void insertNewDatanode(UUID datanodeID, Set containerIDs) - throws SCMException { - super.insertNewDatanode(datanodeID, containerIDs); - } - - /** - * Updates the Container list of an existing DN. - * - * @param datanodeID - UUID of DN. - * @param containers - Set of Containers tht is present on DN. - * @throws SCMException - if we don't know about this datanode, for new DN - * use addDatanodeInContainerMap. - */ - public void setContainersForDatanode(UUID datanodeID, - Set containers) throws SCMException { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(containers); - if (dn2ObjectMap - .computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers)) - == null) { - throw new SCMException("No such datanode", NO_SUCH_DATANODE); - } - } - - @VisibleForTesting - @Override - public int size() { - return dn2ObjectMap.size(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java index 6533cb807642..35107829f883 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java @@ -18,10 +18,14 @@ package org.apache.hadoop.hdds.scm.node.states; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -34,11 +38,13 @@ *

TODO: this information needs to be regenerated from pipeline reports * on SCM restart */ -public class Node2PipelineMap extends Node2ObjectsMap { +public class Node2PipelineMap { + private final Map> dn2PipelineMap = new ConcurrentHashMap<>(); - /** Constructs a Node2PipelineMap Object. */ + /** + * Constructs a Node2PipelineMap Object. + */ public Node2PipelineMap() { - super(); } /** @@ -47,17 +53,19 @@ public Node2PipelineMap() { * @param datanode - UUID * @return Set of pipelines or Null. */ - public Set getPipelines(UUID datanode) { - return getObjects(datanode); + public Set getPipelines(@Nonnull UUID datanode) { + final Set s = dn2PipelineMap.get(datanode); + return s != null ? new HashSet<>(s) : Collections.emptySet(); } /** * Return 0 if there are no pipelines associated with this datanode ID. + * * @param datanode - UUID * @return Number of pipelines or 0. */ public int getPipelinesCount(UUID datanode) { - return getObjects(datanode).size(); + return getPipelines(datanode).size(); } /** @@ -65,18 +73,18 @@ public int getPipelinesCount(UUID datanode) { * * @param pipeline Pipeline to be added */ - public synchronized void addPipeline(Pipeline pipeline) { + public void addPipeline(Pipeline pipeline) { for (DatanodeDetails details : pipeline.getNodes()) { UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet()) + dn2PipelineMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet()) .add(pipeline.getId()); } } - public synchronized void removePipeline(Pipeline pipeline) { + public void removePipeline(Pipeline pipeline) { for (DatanodeDetails details : pipeline.getNodes()) { UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfPresent(dnId, + dn2PipelineMap.computeIfPresent(dnId, (k, v) -> { v.remove(pipeline.getId()); return v; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java index 2a71eede6bf0..e8843fa21e6d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java @@ -19,16 +19,18 @@ package org.apache.hadoop.hdds.scm.node.states; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Predicate; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -43,27 +45,26 @@ * NodeStateManager to maintain the state. If anyone wants to change the * state of a node they should call NodeStateManager, do not directly use * this class. + *

+ * Concurrency consideration: + * - thread-safe */ public class NodeStateMap { /** * Node id to node info map. */ - private final ConcurrentHashMap nodeMap; + private final Map nodeMap = new HashMap<>(); /** * Node to set of containers on the node. */ - private final ConcurrentHashMap> nodeToContainer; + private final Map> nodeToContainer = new HashMap<>(); - private final ReadWriteLock lock; + private final ReadWriteLock lock = new ReentrantReadWriteLock(); /** * Creates a new instance of NodeStateMap with no nodes. */ - public NodeStateMap() { - lock = new ReentrantReadWriteLock(); - nodeMap = new ConcurrentHashMap<>(); - nodeToContainer = new ConcurrentHashMap<>(); - } + public NodeStateMap() { } /** * Adds a node to NodeStateMap. @@ -92,6 +93,23 @@ public void addNode(DatanodeDetails datanodeDetails, NodeStatus nodeStatus, } } + /** + * Removes a node from NodeStateMap. + * + * @param datanodeDetails DatanodeDetails + * + */ + public void removeNode(DatanodeDetails datanodeDetails) { + lock.writeLock().lock(); + try { + UUID uuid = datanodeDetails.getUuid(); + nodeMap.remove(uuid); + nodeToContainer.remove(uuid); + } finally { + lock.writeLock().unlock(); + } + } + /** * Update a node in NodeStateMap. * @@ -129,7 +147,7 @@ public NodeStatus updateNodeHealthState(UUID nodeId, NodeState newHealth) throws NodeNotFoundException { try { lock.writeLock().lock(); - DatanodeInfo dn = getNodeInfo(nodeId); + DatanodeInfo dn = getNodeInfoUnsafe(nodeId); NodeStatus oldStatus = dn.getNodeStatus(); NodeStatus newStatus = new NodeStatus( oldStatus.getOperationalState(), newHealth); @@ -153,7 +171,7 @@ public NodeStatus updateNodeOperationalState(UUID nodeId, throws NodeNotFoundException { try { lock.writeLock().lock(); - DatanodeInfo dn = getNodeInfo(nodeId); + DatanodeInfo dn = getNodeInfoUnsafe(nodeId); NodeStatus oldStatus = dn.getNodeStatus(); NodeStatus newStatus = new NodeStatus( newOpState, oldStatus.getHealth(), opStateExpiryEpochSeconds); @@ -176,8 +194,7 @@ public NodeStatus updateNodeOperationalState(UUID nodeId, public DatanodeInfo getNodeInfo(UUID uuid) throws NodeNotFoundException { lock.readLock().lock(); try { - checkIfNodeExist(uuid); - return nodeMap.get(uuid); + return getNodeInfoUnsafe(uuid); } finally { lock.readLock().unlock(); } @@ -413,7 +430,7 @@ private void checkIfNodeExist(UUID uuid) throws NodeNotFoundException { * @param health * @return List of DatanodeInfo objects matching the passed state */ - public List filterNodes( + private List filterNodes( NodeOperationalState opState, NodeState health) { if (opState != null && health != null) { return filterNodes(matching(new NodeStatus(opState, health))); @@ -445,6 +462,11 @@ private List filterNodes(Predicate filter) { return result; } + private @Nonnull DatanodeInfo getNodeInfoUnsafe(@Nonnull UUID uuid) throws NodeNotFoundException { + checkIfNodeExist(uuid); + return nodeMap.get(uuid); + } + private static Predicate matching(NodeStatus status) { return dn -> status.equals(dn.getNodeStatus()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 8336bce5eae7..163f42351032 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.Comparator; import java.util.List; import java.util.Set; import java.util.stream.Collectors; @@ -244,7 +243,6 @@ private List filterPipelineEngagement() { getPipelineStateManager(), d))) .filter(d -> (d.getPipelines() >= getNodeManager().pipelineLimit(d.getDn()))) - .sorted(Comparator.comparingInt(DnWithPipelines::getPipelines)) .map(d -> d.getDn()) .collect(Collectors.toList()); return excluded; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java index 0914cdd90b22..e77e2aebb31f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteKeyBlocksResultProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.GetClusterTreeResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesRequestProto; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.RatisUtil; +import org.apache.hadoop.hdds.scm.net.InnerNode; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -159,6 +161,10 @@ private SCMBlockLocationResponse processMessage( request.getSortDatanodesRequest(), request.getVersion() )); break; + case GetClusterTree: + response.setGetClusterTreeResponse( + getClusterTree(request.getVersion())); + break; default: // Should never happen throw new IOException("Unknown Operation " + request.getCmdType() + @@ -276,4 +282,13 @@ public SortDatanodesResponseProto sortDatanodes( throw new ServiceException(ex); } } + + public GetClusterTreeResponseProto getClusterTree(int clientVersion) + throws IOException { + GetClusterTreeResponseProto.Builder resp = + GetClusterTreeResponseProto.newBuilder(); + InnerNode clusterTree = impl.getNetworkTopology(); + resp.setClusterTree(clusterTree.toProtobuf(clientVersion).getInnerNode()); + return resp.build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index f402b9309fe4..3d7cff358fe4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -72,6 +72,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; @@ -714,6 +716,12 @@ public ScmContainerLocationResponse processRequest( .setDecommissionScmResponse(decommissionScm( request.getDecommissionScmRequest())) .build(); + case GetMetrics: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setGetMetricsResponse(getMetrics(request.getGetMetricsRequest())) + .build(); default: throw new IllegalArgumentException( "Unknown command type: " + request.getCmdType()); @@ -1099,6 +1107,12 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxSizeToMovePerIterationInGB = Optional.empty(); Optional maxSizeEnteringTargetInGB = Optional.empty(); Optional maxSizeLeavingSourceInGB = Optional.empty(); + Optional balancingInterval = Optional.empty(); + Optional moveTimeout = Optional.empty(); + Optional moveReplicationTimeout = Optional.empty(); + Optional networkTopologyEnable = Optional.empty(); + Optional includeNodes = Optional.empty(); + Optional excludeNodes = Optional.empty(); if (request.hasThreshold()) { threshold = Optional.of(request.getThreshold()); @@ -1124,19 +1138,47 @@ public StartContainerBalancerResponseProto startContainerBalancer( maxSizeToMovePerIterationInGB = Optional.of(request.getMaxSizeToMovePerIterationInGB()); } + if (request.hasMaxSizeEnteringTargetInGB()) { maxSizeEnteringTargetInGB = Optional.of(request.getMaxSizeEnteringTargetInGB()); } + if (request.hasMaxSizeLeavingSourceInGB()) { maxSizeLeavingSourceInGB = Optional.of(request.getMaxSizeLeavingSourceInGB()); } + if (request.hasBalancingInterval()) { + balancingInterval = Optional.of(request.getBalancingInterval()); + } + + if (request.hasMoveTimeout()) { + moveTimeout = Optional.of(request.getMoveTimeout()); + } + + if (request.hasMoveReplicationTimeout()) { + moveReplicationTimeout = Optional.of(request.getMoveReplicationTimeout()); + } + + if (request.hasNetworkTopologyEnable()) { + networkTopologyEnable = Optional.of(request.getNetworkTopologyEnable()); + } + + if (request.hasIncludeNodes()) { + includeNodes = Optional.of(request.getIncludeNodes()); + } + + if (request.hasExcludeNodes()) { + excludeNodes = Optional.of(request.getExcludeNodes()); + } + return impl.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); } public StopContainerBalancerResponseProto stopContainerBalancer( @@ -1157,7 +1199,7 @@ public ContainerBalancerStatusResponseProto getContainerBalancerStatus( public DecommissionNodesResponseProto decommissionNodes( DecommissionNodesRequestProto request) throws IOException { List errors = - impl.decommissionNodes(request.getHostsList()); + impl.decommissionNodes(request.getHostsList(), request.getForce()); DecommissionNodesResponseProto.Builder response = DecommissionNodesResponseProto.newBuilder(); for (DatanodeAdminError e : errors) { @@ -1206,7 +1248,7 @@ public StartMaintenanceNodesResponseProto startMaintenanceNodes( StartMaintenanceNodesRequestProto request) throws IOException { List errors = impl.startMaintenanceNodes(request.getHostsList(), - (int)request.getEndInHours()); + (int)request.getEndInHours(), request.getForce()); StartMaintenanceNodesResponseProto.Builder response = StartMaintenanceNodesResponseProto.newBuilder(); for (DatanodeAdminError e : errors) { @@ -1287,4 +1329,8 @@ public DecommissionScmResponseProto decommissionScm( return impl.decommissionScm( request.getScmId()); } + + public GetMetricsResponseProto getMetrics(GetMetricsRequestProto request) throws IOException { + return GetMetricsResponseProto.newBuilder().setMetricsJson(impl.getMetrics(request.getQuery())).build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java index f882a627c5da..a5ecdb23425b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java @@ -88,6 +88,7 @@ public class SCMSafeModeManager implements SafeModeManager { private final boolean isSafeModeEnabled; private AtomicBoolean inSafeMode = new AtomicBoolean(true); private AtomicBoolean preCheckComplete = new AtomicBoolean(false); + private AtomicBoolean forceExitSafeMode = new AtomicBoolean(false); private Map exitRules = new HashMap(1); private Set preCheckRules = new HashSet<>(1); @@ -151,7 +152,7 @@ public SCMSafeModeManager(ConfigurationSource conf, } } else { this.safeModeMetrics = null; - exitSafeMode(eventQueue); + exitSafeMode(eventQueue, true); } } @@ -173,6 +174,8 @@ public void emitSafeModeStatus() { SafeModeStatus safeModeStatus = new SafeModeStatus(getInSafeMode(), getPreCheckComplete()); + safeModeStatus.setForceExitSafeMode(isForceExitSafeMode()); + // update SCMContext scmContext.updateSafeModeStatus(safeModeStatus); @@ -213,7 +216,7 @@ public synchronized void validateSafeModeExitRules(String ruleName, if (validatedRules.size() == exitRules.size()) { // All rules are satisfied, we can exit safe mode. LOG.info("ScmSafeModeManager, all rules are successfully validated"); - exitSafeMode(eventQueue); + exitSafeMode(eventQueue, false); } } @@ -238,14 +241,16 @@ public void completePreCheck(EventPublisher eventQueue) { * 3. Cleanup resources. * 4. Emit safe mode status. * @param eventQueue + * @param force */ @VisibleForTesting - public void exitSafeMode(EventPublisher eventQueue) { + public void exitSafeMode(EventPublisher eventQueue, boolean force) { LOG.info("SCM exiting safe mode."); // If safemode is exiting, then pre check must also have passed so // set it to true. setPreCheckComplete(true); setInSafeMode(false); + setForceExitSafeMode(force); // TODO: Remove handler registration as there is no need to listen to // register events anymore. @@ -289,7 +294,6 @@ public boolean getInSafeMode() { } return inSafeMode.get(); } - /** * Get the safe mode status of all rules. * @@ -319,6 +323,14 @@ public void setPreCheckComplete(boolean newState) { this.preCheckComplete.set(newState); } + public boolean isForceExitSafeMode() { + return forceExitSafeMode.get(); + } + + public void setForceExitSafeMode(boolean forceExitSafeMode) { + this.forceExitSafeMode.set(forceExitSafeMode); + } + public static Logger getLogger() { return LOG; } @@ -350,6 +362,8 @@ public static class SafeModeStatus { private final boolean safeModeStatus; private final boolean preCheckPassed; + private boolean forceExitSafeMode; + public SafeModeStatus(boolean safeModeState, boolean preCheckPassed) { this.safeModeStatus = safeModeState; this.preCheckPassed = preCheckPassed; @@ -363,6 +377,14 @@ public boolean isPreCheckComplete() { return preCheckPassed; } + public void setForceExitSafeMode(boolean forceExitSafeMode) { + this.forceExitSafeMode = forceExitSafeMode; + } + + public boolean isForceExitSafeMode() { + return forceExitSafeMode; + } + @Override public String toString() { return "SafeModeStatus{" + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java index 80b8257c40b2..02bc10ba6e40 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java @@ -22,6 +22,7 @@ import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; /** * This class is used for maintaining SafeMode metric information, which can @@ -33,16 +34,16 @@ public class SafeModeMetrics { // These all values will be set to some values when safemode is enabled. - private @Metric MutableCounterLong + private @Metric MutableGaugeLong numContainerWithOneReplicaReportedThreshold; private @Metric MutableCounterLong currentContainersWithOneReplicaReportedCount; // When hdds.scm.safemode.pipeline-availability.check is set then only // below metrics will have some values, otherwise they will be zero. - private @Metric MutableCounterLong numHealthyPipelinesThreshold; + private @Metric MutableGaugeLong numHealthyPipelinesThreshold; private @Metric MutableCounterLong currentHealthyPipelinesCount; - private @Metric MutableCounterLong + private @Metric MutableGaugeLong numPipelinesWithAtleastOneReplicaReportedThreshold; private @Metric MutableCounterLong currentPipelinesWithAtleastOneReplicaReportedCount; @@ -55,7 +56,7 @@ public static SafeModeMetrics create() { } public void setNumHealthyPipelinesThreshold(long val) { - this.numHealthyPipelinesThreshold.incr(val); + this.numHealthyPipelinesThreshold.set(val); } public void incCurrentHealthyPipelinesCount() { @@ -63,7 +64,7 @@ public void incCurrentHealthyPipelinesCount() { } public void setNumPipelinesWithAtleastOneReplicaReportedThreshold(long val) { - this.numPipelinesWithAtleastOneReplicaReportedThreshold.incr(val); + this.numPipelinesWithAtleastOneReplicaReportedThreshold.set(val); } public void incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount() { @@ -71,35 +72,35 @@ public void incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount() { } public void setNumContainerWithOneReplicaReportedThreshold(long val) { - this.numContainerWithOneReplicaReportedThreshold.incr(val); + this.numContainerWithOneReplicaReportedThreshold.set(val); } public void incCurrentContainersWithOneReplicaReportedCount() { this.currentContainersWithOneReplicaReportedCount.incr(); } - public MutableCounterLong getNumHealthyPipelinesThreshold() { + MutableGaugeLong getNumHealthyPipelinesThreshold() { return numHealthyPipelinesThreshold; } - public MutableCounterLong getCurrentHealthyPipelinesCount() { + MutableCounterLong getCurrentHealthyPipelinesCount() { return currentHealthyPipelinesCount; } - public MutableCounterLong + MutableGaugeLong getNumPipelinesWithAtleastOneReplicaReportedThreshold() { return numPipelinesWithAtleastOneReplicaReportedThreshold; } - public MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() { + MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() { return currentPipelinesWithAtleastOneReplicaReportedCount; } - public MutableCounterLong getNumContainerWithOneReplicaReportedThreshold() { + MutableGaugeLong getNumContainerWithOneReplicaReportedThreshold() { return numContainerWithOneReplicaReportedThreshold; } - public MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { + MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { return currentContainersWithOneReplicaReportedCount; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java index fcd52d0ebd76..1c1a1c624502 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java @@ -67,7 +67,7 @@ private RootCARotationMetrics(MetricsSystem ms) { this.ms = ms; } - public MutableGaugeLong getSuccessTimeInNs() { + MutableGaugeLong getSuccessTimeInNs() { return successTimeInNs; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index 69f190c7fbd8..79002e27a2e6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -27,6 +27,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.StringUtils; @@ -73,6 +74,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.IO_EXCEPTION; import static org.apache.hadoop.hdds.scm.net.NetConstants.NODE_COST_DEFAULT; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer; import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName; import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; @@ -203,15 +205,19 @@ public List allocateBlock( AllocatedBlock block = scm.getScmBlockManager() .allocateBlock(size, replicationConfig, owner, excludeList); if (block != null) { - blocks.add(block); // Sort the datanodes if client machine is specified final Node client = getClientNode(clientMachine); if (client != null) { final List nodes = block.getPipeline().getNodes(); final List sorted = scm.getClusterMap() .sortByDistanceCost(client, nodes, nodes.size()); - block.getPipeline().setNodesInOrder(sorted); + if (!Objects.equals(sorted, block.getPipeline().getNodesInOrder())) { + block = block.toBuilder() + .setPipeline(block.getPipeline().copyWithNodesInOrder(sorted)) + .build(); + } } + blocks.add(block); } } @@ -412,6 +418,11 @@ private Node getOtherNode(String clientMachine) { return null; } + @Override + public InnerNode getNetworkTopology() { + return (InnerNode) scm.getClusterMap().getNode(ROOT); + } + @Override public AuditMessage buildAuditMessageForSuccess( AuditAction op, Map auditMap) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 13bef8590b79..47bc66d8331a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -22,7 +22,6 @@ package org.apache.hadoop.hdds.scm.server; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.Maps; import com.google.protobuf.BlockingService; @@ -63,6 +62,7 @@ import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; +import org.apache.hadoop.hdds.scm.FetchMetrics; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -99,6 +99,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -591,7 +592,7 @@ public void deleteContainer(long containerID) throws IOException { @Override public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException { try { - return scm.getScmDecommissionManager().getContainersReplicatedOnNode(dn); + return scm.getScmDecommissionManager().getContainersPendingReplication(dn); } catch (NodeNotFoundException e) { throw new IOException("Failed to get containers list. Unable to find required node", e); } @@ -645,11 +646,11 @@ public HddsProtos.Node queryNode(UUID uuid) } @Override - public List decommissionNodes(List nodes) + public List decommissionNodes(List nodes, boolean force) throws IOException { try { getScm().checkAdminAccess(getRemoteUser(), false); - return scm.getScmDecommissionManager().decommissionNodes(nodes); + return scm.getScmDecommissionManager().decommissionNodes(nodes, force); } catch (Exception ex) { LOG.error("Failed to decommission nodes", ex); throw ex; @@ -670,11 +671,11 @@ public List recommissionNodes(List nodes) @Override public List startMaintenanceNodes(List nodes, - int endInHours) throws IOException { + int endInHours, boolean force) throws IOException { try { getScm().checkAdminAccess(getRemoteUser(), false); return scm.getScmDecommissionManager() - .startMaintenanceNodes(nodes, endInHours); + .startMaintenanceNodes(nodes, endInHours, force); } catch (Exception ex) { LOG.error("Failed to place nodes into maintenance mode", ex); throw ex; @@ -1046,67 +1047,130 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTarget, - Optional maxSizeLeavingSource) throws IOException { + Optional maxSizeLeavingSource, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException { getScm().checkAdminAccess(getRemoteUser(), false); ContainerBalancerConfiguration cbc = scm.getConfiguration().getObject(ContainerBalancerConfiguration.class); Map auditMap = Maps.newHashMap(); - if (threshold.isPresent()) { - double tsd = threshold.get(); - auditMap.put("threshold", String.valueOf(tsd)); - Preconditions.checkState(tsd >= 0.0D && tsd < 100.0D, - "threshold should be specified in range [0.0, 100.0)."); - cbc.setThreshold(tsd); - } - if (maxSizeToMovePerIterationInGB.isPresent()) { - long mstm = maxSizeToMovePerIterationInGB.get(); - auditMap.put("maxSizeToMovePerIterationInGB", String.valueOf(mstm)); - Preconditions.checkState(mstm > 0, - "maxSizeToMovePerIterationInGB must be positive."); - cbc.setMaxSizeToMovePerIteration(mstm * OzoneConsts.GB); - } - if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { - int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); - auditMap.put("maxDatanodesPercentageToInvolvePerIteration", - String.valueOf(mdti)); - Preconditions.checkState(mdti >= 0, - "maxDatanodesPercentageToInvolvePerIteration must be " + - "greater than equal to zero."); - Preconditions.checkState(mdti <= 100, - "maxDatanodesPercentageToInvolvePerIteration must be " + - "lesser than or equal to 100."); - cbc.setMaxDatanodesPercentageToInvolvePerIteration(mdti); - } - if (iterations.isPresent()) { - int i = iterations.get(); - auditMap.put("iterations", String.valueOf(i)); - Preconditions.checkState(i > 0 || i == -1, - "number of iterations must be positive or" + + try { + if (threshold.isPresent()) { + double tsd = threshold.get(); + auditMap.put("threshold", String.valueOf(tsd)); + if (tsd < 0.0D || tsd >= 100.0D) { + throw new IOException("Threshold should be specified in the range [0.0, 100.0)."); + } + cbc.setThreshold(tsd); + } + + if (maxSizeToMovePerIterationInGB.isPresent()) { + long mstm = maxSizeToMovePerIterationInGB.get(); + auditMap.put("maxSizeToMovePerIterationInGB", String.valueOf(mstm)); + if (mstm <= 0) { + throw new IOException("Max Size To Move Per Iteration In GB must be positive."); + } + cbc.setMaxSizeToMovePerIteration(mstm * OzoneConsts.GB); + } + + if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { + int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); + auditMap.put("maxDatanodesPercentageToInvolvePerIteration", + String.valueOf(mdti)); + if (mdti < 0 || mdti > 100) { + throw new IOException("Max Datanodes Percentage To Involve Per Iteration" + + "should be specified in the range [0, 100]"); + } + cbc.setMaxDatanodesPercentageToInvolvePerIteration(mdti); + } + + if (iterations.isPresent()) { + int i = iterations.get(); + auditMap.put("iterations", String.valueOf(i)); + if (i < -1 || i == 0) { + throw new IOException("Number of Iterations must be positive or" + " -1 (for running container balancer infinitely)."); - cbc.setIterations(i); - } + } + cbc.setIterations(i); + } - if (maxSizeEnteringTarget.isPresent()) { - long mset = maxSizeEnteringTarget.get(); - auditMap.put("maxSizeEnteringTarget", String.valueOf(mset)); - Preconditions.checkState(mset > 0, - "maxSizeEnteringTarget must be " + + if (maxSizeEnteringTarget.isPresent()) { + long mset = maxSizeEnteringTarget.get(); + auditMap.put("maxSizeEnteringTarget", String.valueOf(mset)); + if (mset <= 0) { + throw new IOException("Max Size Entering Target must be " + "greater than zero."); - cbc.setMaxSizeEnteringTarget(mset * OzoneConsts.GB); - } + } + cbc.setMaxSizeEnteringTarget(mset * OzoneConsts.GB); + } - if (maxSizeLeavingSource.isPresent()) { - long msls = maxSizeLeavingSource.get(); - auditMap.put("maxSizeLeavingSource", String.valueOf(msls)); - Preconditions.checkState(msls > 0, - "maxSizeLeavingSource must be " + + if (maxSizeLeavingSource.isPresent()) { + long msls = maxSizeLeavingSource.get(); + auditMap.put("maxSizeLeavingSource", String.valueOf(msls)); + if (msls <= 0) { + throw new IOException("Max Size Leaving Source must be " + "greater than zero."); - cbc.setMaxSizeLeavingSource(msls * OzoneConsts.GB); - } + } + cbc.setMaxSizeLeavingSource(msls * OzoneConsts.GB); + } - ContainerBalancer containerBalancer = scm.getContainerBalancer(); - try { + if (balancingInterval.isPresent()) { + int bi = balancingInterval.get(); + auditMap.put("balancingInterval", String.valueOf(bi)); + if (bi <= 0) { + throw new IOException("Balancing Interval must be greater than zero."); + } + cbc.setBalancingInterval(Duration.ofMinutes(bi)); + } + + if (moveTimeout.isPresent()) { + int mt = moveTimeout.get(); + auditMap.put("moveTimeout", String.valueOf(mt)); + if (mt <= 0) { + throw new IOException("Move Timeout must be greater than zero."); + } + cbc.setMoveTimeout(Duration.ofMinutes(mt)); + } + + if (moveReplicationTimeout.isPresent()) { + int mrt = moveReplicationTimeout.get(); + auditMap.put("moveReplicationTimeout", String.valueOf(mrt)); + if (mrt <= 0) { + throw new IOException("Move Replication Timeout must be greater than zero."); + } + cbc.setMoveReplicationTimeout(Duration.ofMinutes(mrt)); + } + + if (networkTopologyEnable.isPresent()) { + Boolean nt = networkTopologyEnable.get(); + auditMap.put("networkTopologyEnable", String.valueOf(nt)); + cbc.setNetworkTopologyEnable(nt); + } + + if (includeNodes.isPresent()) { + String in = includeNodes.get(); + auditMap.put("includeNodes", (in)); + cbc.setIncludeNodes(in); + } + + if (excludeNodes.isPresent()) { + String ex = excludeNodes.get(); + auditMap.put("excludeNodes", (ex)); + cbc.setExcludeNodes(ex); + } + + ContainerBalancer containerBalancer = scm.getContainerBalancer(); containerBalancer.startBalancer(cbc); + + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + SCMAction.START_CONTAINER_BALANCER, auditMap)); + return StartContainerBalancerResponseProto.newBuilder() + .setStart(true) + .build(); } catch (IllegalContainerBalancerStateException | IOException | InvalidContainerBalancerConfigurationException e) { AUDIT.logWriteFailure(buildAuditMessageForFailure( @@ -1116,11 +1180,6 @@ public StartContainerBalancerResponseProto startContainerBalancer( .setMessage(e.getMessage()) .build(); } - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - SCMAction.START_CONTAINER_BALANCER, auditMap)); - return StartContainerBalancerResponseProto.newBuilder() - .setStart(true) - .build(); } @Override @@ -1373,4 +1432,10 @@ public DecommissionScmResponseProto decommissionScm( } return decommissionScmResponseBuilder.build(); } + + @Override + public String getMetrics(String query) throws IOException { + FetchMetrics fetchMetrics = new FetchMetrics(); + return fetchMetrics.getMetrics(query); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java index d7d47a78b778..dab66cc51543 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java @@ -63,7 +63,6 @@ public void unRegister() { } @Override - @SuppressWarnings("SuspiciousMethodCalls") public void getMetrics(MetricsCollector collector, boolean all) { Map stateCount = scmmxBean.getContainerStateCount(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java index b6dc6f599bd6..484a1e6f0f4b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -213,7 +213,7 @@ public static class ReportFromDatanode { private final DatanodeDetails datanodeDetails; - private final T report; + private T report; public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) { this.datanodeDetails = datanodeDetails; @@ -227,6 +227,10 @@ public DatanodeDetails getDatanodeDetails() { public T getReport() { return report; } + + public void setReport(T report) { + this.report = report; + } } /** @@ -381,9 +385,11 @@ public String getEventId() { @Override public void mergeReport(ContainerReport nextReport) { if (nextReport.getType() == ContainerReportType.ICR) { - getReport().getReportList().addAll( - ((ReportFromDatanode) nextReport) - .getReport().getReportList()); + // To update existing report list , need to create a builder and then + // merge new reports to existing report list. + IncrementalContainerReportProto reportProto = getReport().toBuilder().addAllReport( + ((ReportFromDatanode) nextReport).getReport().getReportList()).build(); + setReport(reportProto); } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java index 6054e1af07a6..de609356b225 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java @@ -48,6 +48,13 @@ public interface SCMMXBean extends ServiceRuntimeInfo { */ boolean isInSafeMode(); + + /** + * Returns if safe mode exit is forceful. + * @return boolean + */ + boolean isSafeModeExitForceful(); + /** * Returns live safe mode container threshold. * @return String diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java index 0ea2d0e9559b..9cbd6d97deda 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; import org.apache.hadoop.hdds.protocol.SecretKeyProtocolDatanode; import org.apache.hadoop.hdds.protocol.SecretKeyProtocolOm; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; @@ -43,6 +44,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_OM_PROTOCOL_ACL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_SCM_PROTOCOL_ACL; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL; /** * {@link PolicyProvider} for SCM protocols. @@ -85,7 +87,10 @@ public static SCMPolicyProvider getInstance() { SecretKeyProtocolScm.class), new Service( HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL, - SecretKeyProtocolDatanode.class) + SecretKeyProtocolDatanode.class), + new Service( + OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, + ReconfigureProtocol.class) ); @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 11fdc0d16d79..e86dab5fd721 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -845,7 +845,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, pipelineManager, eventQueue, serviceManager, scmContext); } - scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, + scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, containerManager, scmContext, eventQueue, replicationManager); statefulServiceStateManager = StatefulServiceStateManagerImpl.newBuilder() @@ -1796,6 +1796,10 @@ public void shutDown(String message) { ExitUtils.terminate(0, message, LOG); } + public boolean isStopped() { + return isStopped.get(); + } + /** * Wait until service has completed shutdown. */ @@ -1979,6 +1983,11 @@ public boolean isInSafeMode() { return scmSafeModeManager.getInSafeMode(); } + @Override + public boolean isSafeModeExitForceful() { + return scmSafeModeManager.isForceExitSafeMode(); + } + /** * Returns EventPublisher. */ @@ -2011,7 +2020,7 @@ public SCMServiceManager getSCMServiceManager() { * Force SCM out of safe mode. */ public boolean exitSafeMode() { - scmSafeModeManager.exitSafeMode(eventQueue); + scmSafeModeManager.exitSafeMode(eventQueue, true); return true; } diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index fdd8de15b6a9..be110c9cc564 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -124,6 +124,10 @@

Status

Node Manager: Safe mode status {{$ctrl.overview.jmx.InSafeMode}} + + Force Exit Safe Mode + {{$ctrl.overview.jmx.SafeModeExitForceful}} + SCM Roles (HA) {{$ctrl.overview.jmx.ScmRatisRoles}} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 84f3684ab7cc..21c3f1c9a8ab 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; -import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap; import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java similarity index 63% rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java index 5269a7aaeb3e..507eb75c5d78 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java @@ -16,37 +16,47 @@ * */ -package org.apache.hadoop.hdds.scm.node.states; +package org.apache.hadoop.hdds.scm.container; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.states.ReportResult; -import java.util.UUID; -import java.util.Set; +import java.util.Collections; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.TreeSet; -import java.util.HashSet; -import java.util.Collections; - +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE; /** * This data structure maintains the list of containers that is on a datanode. * This information is built from the DN container reports. */ -public class Node2ObjectsMap { +class Node2ContainerMap { + private final Map> dn2ContainerMap = new ConcurrentHashMap<>(); - @SuppressWarnings("visibilitymodifier") - protected final Map> dn2ObjectMap; /** * Constructs a Node2ContainerMap Object. */ - public Node2ObjectsMap() { - dn2ObjectMap = new ConcurrentHashMap<>(); + Node2ContainerMap() { + super(); + } + + /** + * Returns null if there no containers associated with this datanode ID. + * + * @param datanode - UUID + * @return Set of containers or Null. + */ + public @Nonnull Set getContainers(@Nonnull UUID datanode) { + final Set s = dn2ContainerMap.get(datanode); + return s != null ? new HashSet<>(s) : Collections.emptySet(); } /** @@ -56,9 +66,8 @@ public Node2ObjectsMap() { * @param datanodeID - UUID of the Datanode. * @return True if this is tracked, false if this map does not know about it. */ - public boolean isKnownDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - return dn2ObjectMap.containsKey(datanodeID); + public boolean isKnownDatanode(@Nonnull UUID datanodeID) { + return dn2ContainerMap.containsKey(datanodeID); } /** @@ -67,15 +76,10 @@ public boolean isKnownDatanode(UUID datanodeID) { * @param datanodeID -- Datanode UUID * @param containerIDs - List of ContainerIDs. */ - @VisibleForTesting - public void insertNewDatanode(UUID datanodeID, Set containerIDs) + public void insertNewDatanode(@Nonnull UUID datanodeID, @Nonnull Set containerIDs) throws SCMException { - Preconditions.checkNotNull(containerIDs); - Preconditions.checkNotNull(datanodeID); - if (dn2ObjectMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs)) - != null) { - throw new SCMException("Node already exists in the map", - DUPLICATE_DATANODE); + if (dn2ContainerMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs)) != null) { + throw new SCMException("Node already exists in the map", DUPLICATE_DATANODE); } } @@ -84,32 +88,15 @@ public void insertNewDatanode(UUID datanodeID, Set containerIDs) * * @param datanodeID - Datanode ID. */ - @VisibleForTesting - public void removeDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - dn2ObjectMap.computeIfPresent(datanodeID, (k, v) -> null); + public void removeDatanode(@Nonnull UUID datanodeID) { + dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> null); } - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - Set getObjects(UUID datanode) { - Preconditions.checkNotNull(datanode); - final Set s = dn2ObjectMap.get(datanode); - return s != null ? new HashSet<>(s) : Collections.emptySet(); - } - - public ReportResult.ReportResultBuilder newBuilder() { + public @Nonnull ReportResult.ReportResultBuilder newBuilder() { return new ReportResult.ReportResultBuilder<>(); } - public ReportResult processReport(UUID datanodeID, Set objects) { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(objects); - + public @Nonnull ReportResult processReport(@Nonnull UUID datanodeID, @Nonnull Set objects) { if (!isKnownDatanode(datanodeID)) { return newBuilder() .setStatus(ReportResult.ReportStatus.NEW_DATANODE_FOUND) @@ -118,11 +105,11 @@ public ReportResult processReport(UUID datanodeID, Set objects) { } // Conditions like Zero length containers should be handled by removeAll. - Set currentSet = dn2ObjectMap.get(datanodeID); - TreeSet newObjects = new TreeSet<>(objects); + Set currentSet = dn2ContainerMap.get(datanodeID); + TreeSet newObjects = new TreeSet<>(objects); newObjects.removeAll(currentSet); - TreeSet missingObjects = new TreeSet<>(currentSet); + TreeSet missingObjects = new TreeSet<>(currentSet); missingObjects.removeAll(objects); if (newObjects.isEmpty() && missingObjects.isEmpty()) { @@ -159,8 +146,22 @@ public ReportResult processReport(UUID datanodeID, Set objects) { .build(); } - @VisibleForTesting + /** + * Updates the Container list of an existing DN. + * + * @param datanodeID - UUID of DN. + * @param containers - Set of Containers tht is present on DN. + * @throws SCMException - if we don't know about this datanode, for new DN + * use addDatanodeInContainerMap. + */ + public void setContainersForDatanode(@Nonnull UUID datanodeID, @Nonnull Set containers) + throws SCMException { + if (dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers)) == null) { + throw new SCMException("No such datanode", NO_SUCH_DATANODE); + } + } + public int size() { - return dn2ObjectMap.size(); + return dn2ContainerMap.size(); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java similarity index 99% rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java index 0aab0aeca837..92e0a2c494f5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java @@ -17,10 +17,10 @@ * */ -package org.apache.hadoop.hdds.scm.node.states; +package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.states.ReportResult; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -32,9 +32,9 @@ import java.util.concurrent.ConcurrentHashMap; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.assertFalse; /** * Test classes for Node2ContainerMap. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java new file mode 100644 index 000000000000..a3ec55d58639 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java @@ -0,0 +1,319 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +import com.google.protobuf.ByteString; +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.PlacementPolicy; +import org.apache.hadoop.hdds.scm.PlacementPolicyValidateProxy; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.container.ContainerReplicaNotFoundException; +import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMService; +import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; +import org.apache.hadoop.hdds.scm.ha.StatefulServiceStateManager; +import org.apache.hadoop.hdds.scm.ha.StatefulServiceStateManagerImpl; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.mockito.Mockito; + +import java.io.IOException; +import java.time.Clock; +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeoutException; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Class for test used for setting up testable StorageContainerManager. + * Provides an access to {@link TestableCluster} and to necessary mocked instances + */ +public final class MockedSCM { + private final StorageContainerManager scm; + private final TestableCluster cluster; + private final MockNodeManager mockNodeManager; + private final MockedReplicationManager mockedReplicaManager; + private final MoveManager moveManager; + private final ContainerManager containerManager; + private MockedPlacementPolicies mockedPlacementPolicies; + + public MockedSCM(@Nonnull TestableCluster testableCluster) { + scm = mock(StorageContainerManager.class); + cluster = testableCluster; + mockNodeManager = new MockNodeManager(cluster.getDatanodeToContainersMap()); + try { + moveManager = mockMoveManager(); + containerManager = mockContainerManager(cluster); + mockedReplicaManager = MockedReplicationManager.doMock(); + } catch (NodeNotFoundException | ContainerReplicaNotFoundException | ContainerNotFoundException | + TimeoutException e + ) { + throw new RuntimeException("Can't create MockedSCM instance: ", e); + } + } + + private void init(@Nonnull ContainerBalancerConfiguration balancerConfig, @Nonnull OzoneConfiguration ozoneCfg) { + ozoneCfg.setFromObject(balancerConfig); + try { + doMock(balancerConfig, ozoneCfg); + } catch (IOException | NodeNotFoundException | TimeoutException e) { + throw new RuntimeException("Can't create MockedSCM instance: ", e); + } + } + + /** + * Mock some instances that will be used for MockedStorageContainerManager. + */ + private void doMock(@Nonnull ContainerBalancerConfiguration cfg, @Nonnull OzoneConfiguration ozoneCfg) + throws IOException, NodeNotFoundException, TimeoutException { + StatefulServiceStateManager stateManager = MockedServiceStateManager.doMock(); + SCMServiceManager scmServiceManager = mockSCMServiceManger(); + + mockedPlacementPolicies = MockedPlacementPolicies.doMock(ozoneCfg, mockNodeManager); + + when(scm.getConfiguration()).then(invocationOnMock -> { + ozoneCfg.setFromObject(cfg); + return ozoneCfg; + }); + when(scm.getMoveManager()).thenReturn(moveManager); + when(scm.getScmNodeManager()).thenReturn(mockNodeManager); + when(scm.getContainerManager()).thenReturn(containerManager); + when(scm.getReplicationManager()).thenReturn(mockedReplicaManager.manager); + when(scm.getContainerPlacementPolicy()).thenReturn(mockedPlacementPolicies.placementPolicy); + when(scm.getPlacementPolicyValidateProxy()).thenReturn(mockedPlacementPolicies.validateProxyPolicy); + when(scm.getSCMServiceManager()).thenReturn(scmServiceManager); + when(scm.getScmContext()).thenReturn(SCMContext.emptyContext()); + when(scm.getClusterMap()).thenReturn(null); + when(scm.getEventQueue()).thenReturn(mock(EventPublisher.class)); + when(scm.getStatefulServiceStateManager()).thenReturn(stateManager); + } + + @Override + public String toString() { + return cluster.toString(); + } + + public @Nonnull ContainerBalancerTask startBalancerTask( + @Nonnull ContainerBalancer containerBalancer, + @Nonnull ContainerBalancerConfiguration config + ) { + ContainerBalancerTask task = new ContainerBalancerTask(scm, 0, containerBalancer, + containerBalancer.getMetrics(), config, false); + task.run(); + return task; + } + + public @Nonnull ContainerBalancerTask startBalancerTask(@Nonnull ContainerBalancerConfiguration config) { + init(config, new OzoneConfiguration()); + return startBalancerTask(new ContainerBalancer(scm), config); + } + + public void enableLegacyReplicationManager() { + mockedReplicaManager.conf.setEnableLegacy(true); + } + + public void disableLegacyReplicationManager() { + mockedReplicaManager.conf.setEnableLegacy(false); + } + + public @Nonnull MoveManager getMoveManager() { + return moveManager; + } + + public @Nonnull ReplicationManager getReplicationManager() { + return mockedReplicaManager.manager; + } + + public @Nonnull MockNodeManager getNodeManager() { + return mockNodeManager; + } + + public @Nonnull StorageContainerManager getStorageContainerManager() { + return scm; + } + + public @Nonnull TestableCluster getCluster() { + return cluster; + } + + public @Nonnull ContainerManager getContainerManager() { + return containerManager; + } + + public @Nonnull PlacementPolicy getPlacementPolicy() { + return mockedPlacementPolicies.placementPolicy; + } + + public @Nonnull PlacementPolicy getEcPlacementPolicy() { + return mockedPlacementPolicies.ecPlacementPolicy; + } + + private static @Nonnull ContainerManager mockContainerManager(@Nonnull TestableCluster cluster) + throws ContainerNotFoundException { + ContainerManager containerManager = mock(ContainerManager.class); + Mockito + .when(containerManager.getContainerReplicas(any(ContainerID.class))) + .thenAnswer(invocationOnMock -> { + ContainerID cid = (ContainerID) invocationOnMock.getArguments()[0]; + return cluster.getCidToReplicasMap().get(cid); + }); + + Mockito + .when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocationOnMock -> { + ContainerID cid = (ContainerID) invocationOnMock.getArguments()[0]; + return cluster.getCidToInfoMap().get(cid); + }); + + Mockito + .when(containerManager.getContainers()) + .thenReturn(new ArrayList<>(cluster.getCidToInfoMap().values())); + return containerManager; + } + + private static @Nonnull SCMServiceManager mockSCMServiceManger() { + SCMServiceManager scmServiceManager = mock(SCMServiceManager.class); + + Mockito + .doNothing() + .when(scmServiceManager) + .register(Mockito.any(SCMService.class)); + + return scmServiceManager; + } + + private static @Nonnull MoveManager mockMoveManager() + throws NodeNotFoundException, ContainerReplicaNotFoundException, ContainerNotFoundException { + MoveManager moveManager = mock(MoveManager.class); + Mockito + .when(moveManager.move( + any(ContainerID.class), + any(DatanodeDetails.class), + any(DatanodeDetails.class))) + .thenReturn(CompletableFuture.completedFuture(MoveManager.MoveResult.COMPLETED)); + return moveManager; + } + + private static final class MockedReplicationManager { + private final ReplicationManager manager; + private final ReplicationManager.ReplicationManagerConfiguration conf; + + private MockedReplicationManager() { + manager = mock(ReplicationManager.class); + conf = new ReplicationManager.ReplicationManagerConfiguration(); + // Disable LegacyReplicationManager. This means balancer should select RATIS as well as + // EC containers for balancing. Also, MoveManager will be used. + conf.setEnableLegacy(false); + } + + private static @Nonnull MockedReplicationManager doMock() + throws NodeNotFoundException, ContainerNotFoundException, TimeoutException { + MockedReplicationManager mockedManager = new MockedReplicationManager(); + + Mockito + .when(mockedManager.manager.getConfig()) + .thenReturn(mockedManager.conf); + + Mockito + .when(mockedManager.manager.isContainerReplicatingOrDeleting(Mockito.any(ContainerID.class))) + .thenReturn(false); + + Mockito + .when(mockedManager.manager.move( + Mockito.any(ContainerID.class), + Mockito.any(DatanodeDetails.class), + Mockito.any(DatanodeDetails.class))) + .thenReturn(CompletableFuture.completedFuture(MoveManager.MoveResult.COMPLETED)); + + Mockito + .when(mockedManager.manager.getClock()) + .thenReturn(Clock.system(ZoneId.systemDefault())); + + return mockedManager; + } + } + + private static final class MockedServiceStateManager { + private final Map serviceToConfigMap = new HashMap<>(); + private final StatefulServiceStateManager serviceStateManager = Mockito.mock(StatefulServiceStateManagerImpl.class); + + private static @Nonnull StatefulServiceStateManager doMock() throws IOException { + MockedServiceStateManager manager = new MockedServiceStateManager(); + + // When StatefulServiceStateManager#saveConfiguration is called, save to in-memory serviceToConfigMap instead. + Map map = manager.serviceToConfigMap; + StatefulServiceStateManager stateManager = manager.serviceStateManager; + Mockito + .doAnswer(i -> { + map.put(i.getArgument(0, String.class), i.getArgument(1, ByteString.class)); + return null; + }) + .when(stateManager) + .saveConfiguration(Mockito.any(String.class), Mockito.any(ByteString.class)); + + // When StatefulServiceStateManager#readConfiguration is called, read from serviceToConfigMap instead. + Mockito + .when(stateManager.readConfiguration(Mockito.anyString())) + .thenAnswer(i -> map.get(i.getArgument(0, String.class))); + return stateManager; + } + } + + private static final class MockedPlacementPolicies { + private final PlacementPolicy placementPolicy; + private final PlacementPolicy ecPlacementPolicy; + private final PlacementPolicyValidateProxy validateProxyPolicy; + + private MockedPlacementPolicies(@Nonnull PlacementPolicy placementPolicy, @Nonnull PlacementPolicy ecPolicy) { + this.placementPolicy = placementPolicy; + ecPlacementPolicy = ecPolicy; + validateProxyPolicy = new PlacementPolicyValidateProxy(this.placementPolicy, ecPlacementPolicy); + } + + private static @Nonnull MockedPlacementPolicies doMock( + @Nonnull OzoneConfiguration ozoneConfig, + @Nonnull NodeManager nodeManager + ) throws SCMException { + NetworkTopology clusterMap = nodeManager.getClusterNetworkTopologyMap(); + PlacementPolicy policy = ContainerPlacementPolicyFactory.getPolicy( + ozoneConfig, nodeManager, clusterMap, true, SCMContainerPlacementMetrics.create()); + PlacementPolicy ecPolicy = ContainerPlacementPolicyFactory.getECPolicy( + ozoneConfig, nodeManager, clusterMap, true, SCMContainerPlacementMetrics.create()); + return new MockedPlacementPolicies(policy, ecPolicy); + } + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java new file mode 100644 index 000000000000..35804795cc52 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java @@ -0,0 +1,347 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.event.Level; + +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.scm.container.balancer.TestableCluster.RANDOM; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Tests for {@link ContainerBalancerTask} moved from {@link TestContainerBalancerTask} to run them on clusters + * with different datanode count. + */ +public class TestContainerBalancerDatanodeNodeLimit { + private static final long STORAGE_UNIT = OzoneConsts.GB; + private static final int DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER = 15; + + @BeforeAll + public static void setup() { + GenericTestUtils.setLogLevel(ContainerBalancerTask.LOG, Level.DEBUG); + } + + private static Stream createMockedSCMs() { + return Stream.of( + Arguments.of(getMockedSCM(4)), + Arguments.of(getMockedSCM(5)), + Arguments.of(getMockedSCM(6)), + Arguments.of(getMockedSCM(7)), + Arguments.of(getMockedSCM(8)), + Arguments.of(getMockedSCM(9)), + Arguments.of(getMockedSCM(10)), + Arguments.of(getMockedSCM(11)), + Arguments.of(getMockedSCM(12)), + Arguments.of(getMockedSCM(13)), + Arguments.of(getMockedSCM(14)), + Arguments.of(getMockedSCM(15)), + Arguments.of(getMockedSCM(17)), + Arguments.of(getMockedSCM(19)), + Arguments.of(getMockedSCM(20)), + Arguments.of(getMockedSCM(30))); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void containerBalancerShouldObeyMaxDatanodesToInvolveLimit(@Nonnull MockedSCM mockedSCM) { + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + int nodeCount = mockedSCM.getCluster().getNodeCount(); + if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + config.setIterations(1); + config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + ContainerBalancerMetrics metrics = task.getMetrics(); + + int maxDatanodePercentage = config.getMaxDatanodesPercentageToInvolvePerIteration(); + int number = maxDatanodePercentage * nodeCount / 100; + int datanodesInvolvedPerIteration = task.getCountDatanodesInvolvedPerIteration(); + assertThat(datanodesInvolvedPerIteration).isGreaterThan(0); + assertThat(datanodesInvolvedPerIteration).isLessThanOrEqualTo(number); + long numDatanodesInvolvedInLatestIteration = metrics.getNumDatanodesInvolvedInLatestIteration(); + assertThat(numDatanodesInvolvedInLatestIteration).isGreaterThan(0); + assertThat(numDatanodesInvolvedInLatestIteration).isLessThanOrEqualTo(number); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void balancerShouldObeyMaxSizeEnteringTargetLimit(@Nonnull MockedSCM mockedSCM) { + OzoneConfiguration ozoneConfig = new OzoneConfiguration(); + ozoneConfig.set("ozone.scm.container.size", "1MB"); + ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(ozoneConfig); + if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + config.setIterations(1); + config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + // No containers should be selected when the limit is just 2 MB. + config.setMaxSizeEnteringTarget(2 * OzoneConsts.MB); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + // Container balancer still has unbalanced nodes due to MaxSizeEnteringTarget limit + assertTrue(stillHaveUnbalancedNodes(task)); + // ContainerToSourceMap is empty due to MaxSizeEnteringTarget limit + assertTrue(task.getContainerToSourceMap().isEmpty()); + // SizeScheduledForMoveInLatestIteration equals to 0 because there are no containers was selected + assertEquals(0, task.getSizeScheduledForMoveInLatestIteration()); + + // Some containers should be selected when using default values. + ContainerBalancerConfiguration balancerConfig = balancerConfigByOzoneConfig(new OzoneConfiguration()); + if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + balancerConfig.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + balancerConfig.setIterations(1); + + task = mockedSCM.startBalancerTask(balancerConfig); + // Balancer should have identified unbalanced nodes. + assertTrue(stillHaveUnbalancedNodes(task)); + // ContainerToSourceMap is not empty due to some containers should be selected + assertFalse(task.getContainerToSourceMap().isEmpty()); + // SizeScheduledForMoveInLatestIteration doesn't equal to 0 because some containers should be selected + assertNotEquals(0, task.getSizeScheduledForMoveInLatestIteration()); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void balancerShouldObeyMaxSizeLeavingSourceLimit(@Nonnull MockedSCM mockedSCM) { + OzoneConfiguration ozoneConfig = new OzoneConfiguration(); + ozoneConfig.set("ozone.scm.container.size", "1MB"); + ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(ozoneConfig); + if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + config.setIterations(1); + config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + // No source containers should be selected when the limit is just 2 MB. + config.setMaxSizeLeavingSource(2 * OzoneConsts.MB); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + // Container balancer still has unbalanced nodes due to MaxSizeLeavingSource limit + assertTrue(stillHaveUnbalancedNodes(task)); + // ContainerToSourceMap is empty due to MaxSizeLeavingSource limit + assertTrue(task.getContainerToSourceMap().isEmpty()); + // SizeScheduledForMoveInLatestIteration equals to 0 because there are no containers was selected + assertEquals(0, task.getSizeScheduledForMoveInLatestIteration()); + + // Some containers should be selected when using default values. + ContainerBalancerConfiguration newBalancerConfig = balancerConfigByOzoneConfig(new OzoneConfiguration()); + if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + newBalancerConfig.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + newBalancerConfig.setIterations(1); + + task = mockedSCM.startBalancerTask(newBalancerConfig); + // Balancer should have identified unbalanced nodes. + assertTrue(stillHaveUnbalancedNodes(task)); + // ContainerToSourceMap is not empty due to some containers should be selected + assertFalse(task.getContainerToSourceMap().isEmpty()); + // SizeScheduledForMoveInLatestIteration doesn't equal to 0 because some containers should be selected + assertNotEquals(0, task.getSizeScheduledForMoveInLatestIteration()); + } + + /** + * Checks whether ContainerBalancer is correctly updating the list of + * unBalanced nodes with varying values of Threshold. + */ + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void initializeIterationShouldUpdateUnBalancedNodesWhenThresholdChanges(@Nonnull MockedSCM mockedSCM) { + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + int nodeCount = mockedSCM.getCluster().getNodeCount(); + if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + config.setThreshold(10); + config.setIterations(1); + config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + + // check for random threshold values + for (int i = 0; i < 50; i++) { + double randomThreshold = RANDOM.nextDouble() * 100; + List expectedUnBalancedNodes = mockedSCM.getCluster().getUnBalancedNodes(randomThreshold); + config.setThreshold(randomThreshold); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + List unBalancedNodesAccordingToBalancer = getUnBalancedNodes(task); + + assertEquals(expectedUnBalancedNodes.size(), unBalancedNodesAccordingToBalancer.size()); + + for (int j = 0; j < expectedUnBalancedNodes.size(); j++) { + assertEquals(expectedUnBalancedNodes.get(j).getDatanodeDetails(), + unBalancedNodesAccordingToBalancer.get(j).getDatanodeDetails()); + } + } + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void testCalculationOfUtilization(@Nonnull MockedSCM mockedSCM) { + TestableCluster cluster = mockedSCM.getCluster(); + DatanodeUsageInfo[] nodesInCluster = cluster.getNodesInCluster(); + double[] nodeUtilizations = cluster.getNodeUtilizationList(); + assertEquals(nodesInCluster.length, nodeUtilizations.length); + for (int i = 0; i < nodesInCluster.length; i++) { + assertEquals(nodeUtilizations[i], nodesInCluster[i].calculateUtilization(), 0.0001); + } + + // should be equal to average utilization of the cluster + assertEquals(cluster.getAverageUtilization(), + ContainerBalancerTask.calculateAvgUtilization(Arrays.asList(nodesInCluster)), 0.0001); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void testBalancerWithMoveManager(@Nonnull MockedSCM mockedSCM) + throws IOException, NodeNotFoundException, TimeoutException { + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + int nodeCount = mockedSCM.getCluster().getNodeCount(); + if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + config.setThreshold(10); + config.setIterations(1); + config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + + mockedSCM.disableLegacyReplicationManager(); + mockedSCM.startBalancerTask(config); + + verify(mockedSCM.getMoveManager(), atLeastOnce()) + .move(any(ContainerID.class), + any(DatanodeDetails.class), + any(DatanodeDetails.class)); + + verify(mockedSCM.getReplicationManager(), times(0)) + .move(any(ContainerID.class), any( + DatanodeDetails.class), any(DatanodeDetails.class)); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced(@Nonnull MockedSCM mockedSCM) { + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + int nodeCount = mockedSCM.getCluster().getNodeCount(); + if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + config.setThreshold(10); + config.setIterations(1); + config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + config.setThreshold(99.99); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + ContainerBalancerMetrics metrics = task.getMetrics(); + assertEquals(0, getUnBalancedNodes(task).size()); + assertEquals(0, metrics.getNumDatanodesUnbalanced()); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void testMetrics(@Nonnull MockedSCM mockedSCM) throws IOException, NodeNotFoundException { + OzoneConfiguration ozoneConfig = new OzoneConfiguration(); + ozoneConfig.set("hdds.datanode.du.refresh.period", "1ms"); + ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(ozoneConfig); + int nodeCount = mockedSCM.getCluster().getNodeCount(); + if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + config.setBalancingInterval(Duration.ofMillis(2)); + config.setThreshold(10); + config.setIterations(1); + config.setMaxSizeEnteringTarget(6 * STORAGE_UNIT); + // deliberately set max size per iteration to a low value, 6 GB + config.setMaxSizeToMovePerIteration(6 * STORAGE_UNIT); + + when(mockedSCM.getMoveManager().move(any(), any(), any())) + .thenReturn(CompletableFuture.completedFuture(MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY)) + .thenReturn(CompletableFuture.completedFuture(MoveManager.MoveResult.COMPLETED)); + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + + ContainerBalancerMetrics metrics = task.getMetrics(); + assertEquals(mockedSCM.getCluster().getUnBalancedNodes(config.getThreshold()).size(), + metrics.getNumDatanodesUnbalanced()); + assertThat(metrics.getDataSizeMovedGBInLatestIteration()).isLessThanOrEqualTo(6); + assertThat(metrics.getDataSizeMovedGB()).isGreaterThan(0); + assertEquals(1, metrics.getNumIterations()); + assertThat(metrics.getNumContainerMovesScheduledInLatestIteration()).isGreaterThan(0); + assertEquals(metrics.getNumContainerMovesScheduled(), metrics.getNumContainerMovesScheduledInLatestIteration()); + assertEquals(metrics.getNumContainerMovesScheduled(), + metrics.getNumContainerMovesCompleted() + + metrics.getNumContainerMovesFailed() + + metrics.getNumContainerMovesTimeout()); + assertEquals(0, metrics.getNumContainerMovesTimeout()); + assertEquals(1, metrics.getNumContainerMovesFailed()); + } + + + public static List getUnBalancedNodes(@Nonnull ContainerBalancerTask task) { + ArrayList result = new ArrayList<>(); + result.addAll(task.getOverUtilizedNodes()); + result.addAll(task.getUnderUtilizedNodes()); + return result; + } + + private static boolean stillHaveUnbalancedNodes(@Nonnull ContainerBalancerTask task) { + return !getUnBalancedNodes(task).isEmpty(); + } + + public static @Nonnull MockedSCM getMockedSCM(int datanodeCount) { + return new MockedSCM(new TestableCluster(datanodeCount, STORAGE_UNIT)); + } + + private static @Nonnull ContainerBalancerConfiguration balancerConfigByOzoneConfig( + @Nonnull OzoneConfiguration ozoneConfiguration + ) { + return ozoneConfiguration.getObject(ContainerBalancerConfiguration.class); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 3bed3878123d..0f4551b45c24 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -55,6 +55,7 @@ import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -76,7 +77,6 @@ import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotSame; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -88,7 +88,6 @@ import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; @@ -115,7 +114,6 @@ public class TestContainerBalancerTask { private ContainerBalancerConfiguration balancerConfiguration; private List nodesInCluster; private List nodeUtilizations; - private double averageUtilization; private int numberOfNodes; private Map> cidToReplicasMap = new HashMap<>(); @@ -132,7 +130,7 @@ public class TestContainerBalancerTask { * Sets up configuration values and creates a mock cluster. */ @BeforeEach - public void setup() throws IOException, NodeNotFoundException, + public void setup(TestInfo testInfo) throws IOException, NodeNotFoundException, TimeoutException { conf = new OzoneConfiguration(); rmConf = new ReplicationManagerConfiguration(); @@ -164,7 +162,11 @@ public void setup() throws IOException, NodeNotFoundException, conf.setFromObject(balancerConfiguration); GenericTestUtils.setLogLevel(ContainerBalancerTask.LOG, Level.DEBUG); - averageUtilization = createCluster(); + int[] sizeArray = testInfo.getTestMethod() + .filter(method -> method.getName().equals("balancerShouldMoveOnlyPositiveSizeContainers")) + .map(method -> new int[]{0, 0, 0, 0, 0, 1, 2, 3, 4, 5}) + .orElse(null); + createCluster(sizeArray); mockNodeManager = new MockNodeManager(datanodeToContainersMap); NetworkTopology clusterMap = mockNodeManager.getClusterNetworkTopologyMap(); @@ -246,114 +248,6 @@ public void setup() throws IOException, NodeNotFoundException, sb.getMetrics(), balancerConfiguration, false); } - @Test - public void testCalculationOfUtilization() { - assertEquals(nodesInCluster.size(), nodeUtilizations.size()); - for (int i = 0; i < nodesInCluster.size(); i++) { - assertEquals(nodeUtilizations.get(i), - nodesInCluster.get(i).calculateUtilization(), 0.0001); - } - - // should be equal to average utilization of the cluster - assertEquals(averageUtilization, containerBalancerTask.calculateAvgUtilization(nodesInCluster), 0.0001); - } - - /** - * Checks whether ContainerBalancer is correctly updating the list of - * unBalanced nodes with varying values of Threshold. - */ - @Test - public void - initializeIterationShouldUpdateUnBalancedNodesWhenThresholdChanges() { - List expectedUnBalancedNodes; - List unBalancedNodesAccordingToBalancer; - - // check for random threshold values - ContainerBalancer sb = new ContainerBalancer(scm); - for (int i = 0; i < 50; i++) { - double randomThreshold = RANDOM.nextDouble() * 100; - - expectedUnBalancedNodes = - determineExpectedUnBalancedNodes(randomThreshold); - - balancerConfiguration.setThreshold(randomThreshold); - containerBalancerTask = new ContainerBalancerTask(scm, 0, sb, - sb.getMetrics(), balancerConfiguration, false); - containerBalancerTask.run(); - - unBalancedNodesAccordingToBalancer = - containerBalancerTask.getUnBalancedNodes(); - - assertEquals(expectedUnBalancedNodes.size(), unBalancedNodesAccordingToBalancer.size()); - - for (int j = 0; j < expectedUnBalancedNodes.size(); j++) { - assertEquals(expectedUnBalancedNodes.get(j).getDatanodeDetails(), - unBalancedNodesAccordingToBalancer.get(j).getDatanodeDetails()); - } - } - } - - @Test - public void testBalancerWithMoveManager() - throws IllegalContainerBalancerStateException, IOException, - InvalidContainerBalancerConfigurationException, TimeoutException, - NodeNotFoundException { - rmConf.setEnableLegacy(false); - startBalancer(balancerConfiguration); - verify(moveManager, atLeastOnce()) - .move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class)); - - verify(replicationManager, times(0)) - .move(any(ContainerID.class), any( - DatanodeDetails.class), any(DatanodeDetails.class)); - } - - /** - * Checks whether the list of unBalanced nodes is empty when the cluster is - * balanced. - */ - @Test - public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced() - throws IllegalContainerBalancerStateException, IOException, - InvalidContainerBalancerConfigurationException, TimeoutException { - balancerConfiguration.setThreshold(99.99); - startBalancer(balancerConfiguration); - - - stopBalancer(); - ContainerBalancerMetrics metrics = containerBalancerTask.getMetrics(); - assertEquals(0, containerBalancerTask.getUnBalancedNodes().size()); - assertEquals(0, metrics.getNumDatanodesUnbalanced()); - } - - /** - * ContainerBalancer should not involve more datanodes than the - * maxDatanodesRatioToInvolvePerIteration limit. - */ - @Test - public void containerBalancerShouldObeyMaxDatanodesToInvolveLimit() - throws IllegalContainerBalancerStateException, IOException, - InvalidContainerBalancerConfigurationException, TimeoutException { - int percent = 40; - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration( - percent); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setThreshold(1); - balancerConfiguration.setIterations(1); - startBalancer(balancerConfiguration); - - int number = percent * numberOfNodes / 100; - ContainerBalancerMetrics metrics = containerBalancerTask.getMetrics(); - assertThat(containerBalancerTask.getCountDatanodesInvolvedPerIteration()) - .isLessThanOrEqualTo(number); - assertThat(metrics.getNumDatanodesInvolvedInLatestIteration()).isGreaterThan(0); - assertThat(metrics.getNumDatanodesInvolvedInLatestIteration()) - .isLessThanOrEqualTo(number); - stopBalancer(); - } - @Test public void containerBalancerShouldSelectOnlyClosedContainers() throws IllegalContainerBalancerStateException, IOException, @@ -367,8 +261,7 @@ public void containerBalancerShouldSelectOnlyClosedContainers() stopBalancer(); // balancer should have identified unbalanced nodes - assertFalse(containerBalancerTask.getUnBalancedNodes() - .isEmpty()); + assertFalse(TestContainerBalancerDatanodeNodeLimit.getUnBalancedNodes(containerBalancerTask).isEmpty()); // no container should have been selected assertTrue(containerBalancerTask.getContainerToSourceMap() .isEmpty()); @@ -429,8 +322,7 @@ public void balancerShouldNotSelectNonClosedContainerReplicas() stopBalancer(); // balancer should have identified unbalanced nodes - assertFalse(containerBalancerTask.getUnBalancedNodes() - .isEmpty()); + assertFalse(TestContainerBalancerDatanodeNodeLimit.getUnBalancedNodes(containerBalancerTask).isEmpty()); // no container should have moved because all replicas are CLOSING assertTrue( containerBalancerTask.getContainerToSourceMap().isEmpty()); @@ -609,126 +501,6 @@ public void balancerShouldNotSelectConfiguredExcludeContainers() } } - @Test - public void balancerShouldObeyMaxSizeEnteringTargetLimit() - throws IllegalContainerBalancerStateException, IOException, - InvalidContainerBalancerConfigurationException, TimeoutException { - conf.set("ozone.scm.container.size", "1MB"); - balancerConfiguration = - conf.getObject(ContainerBalancerConfiguration.class); - balancerConfiguration.setThreshold(10); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - balancerConfiguration.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - - // no containers should be selected when the limit is just 2 MB - balancerConfiguration.setMaxSizeEnteringTarget(2 * OzoneConsts.MB); - startBalancer(balancerConfiguration); - - assertFalse(containerBalancerTask.getUnBalancedNodes() - .isEmpty()); - assertTrue(containerBalancerTask.getContainerToSourceMap() - .isEmpty()); - stopBalancer(); - - // some containers should be selected when using default values - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ContainerBalancerConfiguration cbc = ozoneConfiguration. - getObject(ContainerBalancerConfiguration.class); - cbc.setBalancingInterval(1); - ContainerBalancer sb = new ContainerBalancer(scm); - containerBalancerTask = new ContainerBalancerTask(scm, 0, sb, - sb.getMetrics(), cbc, false); - containerBalancerTask.run(); - - stopBalancer(); - // balancer should have identified unbalanced nodes - assertFalse(containerBalancerTask.getUnBalancedNodes() - .isEmpty()); - assertFalse(containerBalancerTask.getContainerToSourceMap() - .isEmpty()); - } - - @Test - public void balancerShouldObeyMaxSizeLeavingSourceLimit() - throws IllegalContainerBalancerStateException, IOException, - InvalidContainerBalancerConfigurationException, TimeoutException { - conf.set("ozone.scm.container.size", "1MB"); - balancerConfiguration = - conf.getObject(ContainerBalancerConfiguration.class); - balancerConfiguration.setThreshold(10); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - balancerConfiguration.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - - // no source containers should be selected when the limit is just 2 MB - balancerConfiguration.setMaxSizeLeavingSource(2 * OzoneConsts.MB); - startBalancer(balancerConfiguration); - - assertFalse(containerBalancerTask.getUnBalancedNodes() - .isEmpty()); - assertTrue(containerBalancerTask.getContainerToSourceMap() - .isEmpty()); - stopBalancer(); - - // some containers should be selected when using default values - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ContainerBalancerConfiguration cbc = ozoneConfiguration. - getObject(ContainerBalancerConfiguration.class); - cbc.setBalancingInterval(1); - ContainerBalancer sb = new ContainerBalancer(scm); - containerBalancerTask = new ContainerBalancerTask(scm, 0, sb, - sb.getMetrics(), cbc, false); - containerBalancerTask.run(); - - stopBalancer(); - // balancer should have identified unbalanced nodes - assertFalse(containerBalancerTask.getUnBalancedNodes() - .isEmpty()); - assertFalse(containerBalancerTask.getContainerToSourceMap() - .isEmpty()); - assertNotEquals(0, - containerBalancerTask.getSizeScheduledForMoveInLatestIteration()); - } - - @Test - public void testMetrics() - throws IllegalContainerBalancerStateException, IOException, - InvalidContainerBalancerConfigurationException, TimeoutException, - NodeNotFoundException { - conf.set("hdds.datanode.du.refresh.period", "1ms"); - balancerConfiguration.setBalancingInterval(Duration.ofMillis(2)); - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(6 * STORAGE_UNIT); - // deliberately set max size per iteration to a low value, 6 GB - balancerConfiguration.setMaxSizeToMovePerIteration(6 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - when(moveManager.move(any(), any(), any())) - .thenReturn(CompletableFuture.completedFuture( - MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY)) - .thenReturn(CompletableFuture.completedFuture( - MoveManager.MoveResult.COMPLETED)); - - startBalancer(balancerConfiguration); - stopBalancer(); - - ContainerBalancerMetrics metrics = containerBalancerTask.getMetrics(); - assertEquals(determineExpectedUnBalancedNodes( - balancerConfiguration.getThreshold()).size(), - metrics.getNumDatanodesUnbalanced()); - assertThat(metrics.getDataSizeMovedGBInLatestIteration()).isLessThanOrEqualTo(6); - assertThat(metrics.getDataSizeMovedGB()).isGreaterThan(0); - assertEquals(1, metrics.getNumIterations()); - assertThat(metrics.getNumContainerMovesScheduledInLatestIteration()).isGreaterThan(0); - assertEquals(metrics.getNumContainerMovesScheduled(), - metrics.getNumContainerMovesScheduledInLatestIteration()); - assertEquals(metrics.getNumContainerMovesScheduled(), - metrics.getNumContainerMovesCompleted() + - metrics.getNumContainerMovesFailed() + - metrics.getNumContainerMovesTimeout()); - assertEquals(0, metrics.getNumContainerMovesTimeout()); - assertEquals(1, metrics.getNumContainerMovesFailed()); - } - /** * Tests if {@link ContainerBalancer} follows the includeNodes and * excludeNodes configurations in {@link ContainerBalancerConfiguration}. @@ -1115,32 +887,68 @@ public void balancerShouldExcludeECContainersWhenLegacyRmIsEnabled() } /** - * Determines unBalanced nodes, that is, over and under utilized nodes, - * according to the generated utilization values for nodes and the threshold. - * - * @param threshold A percentage in the range 0 to 100 - * @return List of DatanodeUsageInfo containing the expected(correct) - * unBalanced nodes. + * Tests if balancer is adding the polled source datanode back to potentialSources queue + * if a move has failed due to a container related failure, like REPLICATION_FAIL_NOT_EXIST_IN_SOURCE. */ - private List determineExpectedUnBalancedNodes( - double threshold) { - threshold /= 100; - double lowerLimit = averageUtilization - threshold; - double upperLimit = averageUtilization + threshold; - - // use node utilizations to determine over and under utilized nodes - List expectedUnBalancedNodes = new ArrayList<>(); - for (int i = 0; i < numberOfNodes; i++) { - if (nodeUtilizations.get(numberOfNodes - i - 1) > upperLimit) { - expectedUnBalancedNodes.add(nodesInCluster.get(numberOfNodes - i - 1)); - } - } - for (int i = 0; i < numberOfNodes; i++) { - if (nodeUtilizations.get(i) < lowerLimit) { - expectedUnBalancedNodes.add(nodesInCluster.get(i)); + @Test + public void testSourceDatanodeAddedBack() + throws NodeNotFoundException, IOException, IllegalContainerBalancerStateException, + InvalidContainerBalancerConfigurationException, TimeoutException, InterruptedException { + + when(moveManager.move(any(ContainerID.class), + any(DatanodeDetails.class), + any(DatanodeDetails.class))) + .thenReturn(CompletableFuture.completedFuture(MoveManager.MoveResult.REPLICATION_FAIL_NOT_EXIST_IN_SOURCE)) + .thenReturn(CompletableFuture.completedFuture(MoveManager.MoveResult.COMPLETED)); + balancerConfiguration.setThreshold(10); + balancerConfiguration.setIterations(1); + balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); + balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); + String includeNodes = nodesInCluster.get(0).getDatanodeDetails().getHostName() + "," + + nodesInCluster.get(nodesInCluster.size() - 1).getDatanodeDetails().getHostName(); + balancerConfiguration.setIncludeNodes(includeNodes); + + startBalancer(balancerConfiguration); + GenericTestUtils.waitFor(() -> ContainerBalancerTask.IterationResult.ITERATION_COMPLETED == + containerBalancerTask.getIterationResult(), 10, 50); + + assertEquals(2, containerBalancerTask.getCountDatanodesInvolvedPerIteration()); + assertTrue(containerBalancerTask.getMetrics().getNumContainerMovesCompletedInLatestIteration() >= 1); + assertThat(containerBalancerTask.getMetrics().getNumContainerMovesFailed()).isEqualTo(1); + assertTrue(containerBalancerTask.getSelectedTargets().contains(nodesInCluster.get(0) + .getDatanodeDetails())); + assertTrue(containerBalancerTask.getSelectedSources().contains(nodesInCluster.get(nodesInCluster.size() - 1) + .getDatanodeDetails())); + stopBalancer(); + } + + /** + * Test to check if balancer picks up only positive size + * containers to move from source to destination. + */ + @Test + public void balancerShouldMoveOnlyPositiveSizeContainers() + throws IllegalContainerBalancerStateException, IOException, + InvalidContainerBalancerConfigurationException, TimeoutException { + + startBalancer(balancerConfiguration); + /* + Get all containers that were selected by balancer and assert none of + them is a zero or negative size container. + */ + Map containerToSource = + containerBalancerTask.getContainerToSourceMap(); + assertFalse(containerToSource.isEmpty()); + boolean zeroOrNegSizeContainerMoved = false; + for (Map.Entry entry : + containerToSource.entrySet()) { + ContainerInfo containerInfo = cidToInfoMap.get(entry.getKey()); + if (containerInfo.getUsedBytes() <= 0) { + zeroOrNegSizeContainerMoved = true; } } - return expectedUnBalancedNodes; + assertFalse(zeroOrNegSizeContainerMoved); } /** @@ -1169,10 +977,9 @@ private void generateUtilizations(int count) throws IllegalArgumentException { * cluster have utilization values determined by generateUtilizations method. * @return average utilization (used space / capacity) of the cluster */ - private double createCluster() { - generateData(); + private void createCluster(int[] sizeArray) { + generateData(sizeArray); createReplicasForContainers(); - long clusterCapacity = 0, clusterUsedSpace = 0; // for each node utilization, calculate that datanode's used space and // capacity @@ -1195,16 +1002,13 @@ private double createCluster() { datanodeCapacity - datanodeUsedSpace, 0, datanodeCapacity - datanodeUsedSpace - 1); nodesInCluster.get(i).setScmNodeStat(stat); - clusterUsedSpace += datanodeUsedSpace; - clusterCapacity += datanodeCapacity; } - return (double) clusterUsedSpace / clusterCapacity; } /** * Create some datanodes and containers for each node. */ - private void generateData() { + private void generateData(int[] sizeArray) { this.numberOfNodes = 10; generateUtilizations(numberOfNodes); nodesInCluster = new ArrayList<>(nodeUtilizations.size()); @@ -1216,13 +1020,19 @@ private void generateData() { new DatanodeUsageInfo(MockDatanodeDetails.randomDatanodeDetails(), new SCMNodeStat()); - // create containers with varying used space int sizeMultiple = 0; + if (sizeArray == null) { + sizeArray = new int[10]; + for (int j = 0; j < numberOfNodes; j++) { + sizeArray[j] = sizeMultiple; + sizeMultiple %= 5; + sizeMultiple++; + } + } + // create containers with varying used space for (int j = 0; j < i; j++) { - sizeMultiple %= 5; - sizeMultiple++; ContainerInfo container = - createContainer((long) i * i + j, sizeMultiple); + createContainer((long) i * i + j, sizeArray[j]); cidToInfoMap.put(container.containerID(), container); containerIDSet.add(container.containerID()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java new file mode 100644 index 000000000000..a565fac0e3d6 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java @@ -0,0 +1,257 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ThreadLocalRandom; + +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; + +/** + * Class is used for creating test cluster with a required number of datanodes. + * 1. Fill the cluster by generating some data. + * 2. Nodes in the cluster have utilization values determined by generateUtilization method. + */ +public final class TestableCluster { + static final ThreadLocalRandom RANDOM = ThreadLocalRandom.current(); + private static final Logger LOG = LoggerFactory.getLogger(TestableCluster.class); + private final int nodeCount; + private final double[] nodeUtilizationList; + private final DatanodeUsageInfo[] nodesInCluster; + private final Map cidToInfoMap = new HashMap<>(); + private final Map> cidToReplicasMap = new HashMap<>(); + private final Map> dnUsageToContainersMap = new HashMap<>(); + private final double averageUtilization; + + TestableCluster(int numberOfNodes, long storageUnit) { + nodeCount = numberOfNodes; + nodeUtilizationList = createUtilizationList(nodeCount); + nodesInCluster = new DatanodeUsageInfo[nodeCount]; + + generateData(storageUnit); + createReplicasForContainers(); + long clusterCapacity = 0, clusterUsedSpace = 0; + + // For each node utilization, calculate that datanode's used space and capacity. + for (int i = 0; i < nodeUtilizationList.length; i++) { + Set containerIDSet = dnUsageToContainersMap.get(nodesInCluster[i]); + long datanodeUsedSpace = 0; + for (ContainerID containerID : containerIDSet) { + datanodeUsedSpace += cidToInfoMap.get(containerID).getUsedBytes(); + } + // Use node utilization and used space to determine node capacity. + long datanodeCapacity = (nodeUtilizationList[i] == 0) + ? storageUnit * RANDOM.nextInt(10, 60) + : (long) (datanodeUsedSpace / nodeUtilizationList[i]); + + SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace, + datanodeCapacity - datanodeUsedSpace, 0, + datanodeCapacity - datanodeUsedSpace - 1); + nodesInCluster[i].setScmNodeStat(stat); + clusterUsedSpace += datanodeUsedSpace; + clusterCapacity += datanodeCapacity; + } + + averageUtilization = (double) clusterUsedSpace / clusterCapacity; + } + + @Override + public String toString() { + return "cluster of " + nodeCount + " nodes"; + } + + @Nonnull Map> getDatanodeToContainersMap() { + return dnUsageToContainersMap; + } + + @Nonnull Map getCidToInfoMap() { + return cidToInfoMap; + } + + int getNodeCount() { + return nodeCount; + } + + double getAverageUtilization() { + return averageUtilization; + } + + @Nonnull DatanodeUsageInfo[] getNodesInCluster() { + return nodesInCluster; + } + + double[] getNodeUtilizationList() { + return nodeUtilizationList; + } + + @Nonnull Map> getCidToReplicasMap() { + return cidToReplicasMap; + } + + /** + * Determines unBalanced nodes, that is, over and under utilized nodes, + * according to the generated utilization values for nodes and the threshold. + * + * @param threshold a percentage in the range 0 to 100 + * @return list of DatanodeUsageInfo containing the expected(correct) unBalanced nodes. + */ + @Nonnull List getUnBalancedNodes(double threshold) { + threshold /= 100; + double lowerLimit = averageUtilization - threshold; + double upperLimit = averageUtilization + threshold; + + // Use node utilization to determine over and under utilized nodes. + List expectedUnBalancedNodes = new ArrayList<>(); + for (int i = 0; i < nodeCount; i++) { + if (nodeUtilizationList[nodeCount - i - 1] > upperLimit) { + expectedUnBalancedNodes.add(nodesInCluster[nodeCount - i - 1]); + } + } + for (int i = 0; i < nodeCount; i++) { + if (nodeUtilizationList[i] < lowerLimit) { + expectedUnBalancedNodes.add(nodesInCluster[i]); + } + } + return expectedUnBalancedNodes; + } + + /** + * Create some datanodes and containers for each node. + */ + private void generateData(long storageUnit) { + // Create datanodes and add containers to them. + for (int i = 0; i < nodeCount; i++) { + DatanodeUsageInfo usageInfo = + new DatanodeUsageInfo(MockDatanodeDetails.randomDatanodeDetails(), new SCMNodeStat()); + nodesInCluster[i] = usageInfo; + + // Create containers with varying used space. + Set containerIDSet = new HashSet<>(); + int sizeMultiple = 0; + for (int j = 0; j < i; j++) { + sizeMultiple %= 5; + sizeMultiple++; + ContainerInfo container = createContainer((long) i * i + j, storageUnit * sizeMultiple); + + cidToInfoMap.put(container.containerID(), container); + containerIDSet.add(container.containerID()); + + // Create initial replica for this container and add it. + Set containerReplicaSet = new HashSet<>(); + containerReplicaSet.add( + createReplica(container.containerID(), usageInfo.getDatanodeDetails(), container.getUsedBytes())); + cidToReplicasMap.put(container.containerID(), containerReplicaSet); + } + dnUsageToContainersMap.put(usageInfo, containerIDSet); + } + } + + private @Nonnull ContainerInfo createContainer(long id, long usedBytes) { + ContainerInfo.Builder builder = new ContainerInfo.Builder() + .setContainerID(id) + .setState(HddsProtos.LifeCycleState.CLOSED) + .setOwner("TestContainerBalancer") + .setUsedBytes(usedBytes); + + // Make it a RATIS container if id is even, else make it an EC container. + ReplicationConfig config = (id % 2 == 0) + ? RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE) + : new ECReplicationConfig(3, 2); + + builder.setReplicationConfig(config); + return builder.build(); + } + + /** + * Create the required number of replicas for each container. Note that one replica already exists and + * nodes with utilization value 0 should not have any replicas. + */ + private void createReplicasForContainers() { + for (ContainerInfo container : cidToInfoMap.values()) { + // One replica already exists; create the remaining ones. + ReplicationConfig replicationConfig = container.getReplicationConfig(); + ContainerID key = container.containerID(); + for (int i = 0; i < replicationConfig.getRequiredNodes() - 1; i++) { + // Randomly pick a datanode for this replica. + int dnIndex = RANDOM.nextInt(0, nodeCount); + // Don't put replicas in DNs that are supposed to have 0 utilization. + if (Math.abs(nodeUtilizationList[dnIndex] - 0.0d) > 0.00001) { + DatanodeDetails node = nodesInCluster[dnIndex].getDatanodeDetails(); + Set replicas = cidToReplicasMap.get(key); + replicas.add(createReplica(key, node, container.getUsedBytes())); + cidToReplicasMap.put(key, replicas); + dnUsageToContainersMap.get(nodesInCluster[dnIndex]).add(key); + } + } + } + } + + /** + * Generates a range of equally spaced utilization(that is, used / capacity) values from 0 to 1. + * + * @param count Number of values to generate. Count must be greater than or equal to 1. + * @return double array of node utilization values + * @throws IllegalArgumentException If the value of the parameter count is less than 1. + */ + private static double[] createUtilizationList(int count) throws IllegalArgumentException { + if (count < 1) { + LOG.warn("The value of argument count is {}. However, count must be greater than 0.", count); + throw new IllegalArgumentException(); + } + double[] result = new double[count]; + for (int i = 0; i < count; i++) { + result[i] = (i / (double) count); + } + return result; + } + + private @Nonnull ContainerReplica createReplica( + @Nonnull ContainerID containerID, + @Nonnull DatanodeDetails datanodeDetails, + long usedBytes + ) { + return ContainerReplica.newBuilder() + .setContainerID(containerID) + .setContainerState(ContainerReplicaProto.State.CLOSED) + .setDatanodeDetails(datanodeDetails) + .setOriginNodeId(datanodeDetails.getUuid()) + .setSequenceId(1000L) + .setBytesUsed(usedBytes) + .build(); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java index a97cdbddb8af..3775531d30d1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.ozone.test.TestClock; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -74,6 +75,13 @@ public void setup() { dn3 = MockDatanodeDetails.randomDatanodeDetails(); } + @AfterEach + void cleanup() { + if (metrics != null) { + metrics.unRegister(); + } + } + @Test public void testGetPendingOpsReturnsEmptyList() { List ops = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java index 22c3630e0c6b..f4edb8a4280a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.assertj.core.util.Lists; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -82,7 +83,6 @@ import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -174,6 +174,13 @@ public NodeStatus getNodeStatus(DatanodeDetails dd) { .thenReturn(new ContainerPlacementStatusDefault(2, 2, 3)); } + @AfterEach + void cleanup() { + if (metrics != null) { + metrics.unRegister(); + } + } + @ParameterizedTest @ValueSource(strings = {"rs-6-3-1024k", "rs-10-4-1024k"}) void defersNonCriticalPartialReconstruction(String rep) throws IOException { @@ -1163,10 +1170,6 @@ public void testMaintenanceIndexCopiedWhenContainerUnRecoverable() result, remainingMaintenanceRedundancy); int replicateCommand = 0; int reconstructCommand = 0; - byte[] missingIndexesByteArr = new byte[missingIndexes.size()]; - for (int i = 0; i < missingIndexes.size(); i++) { - missingIndexesByteArr[i] = missingIndexes.get(i).byteValue(); - } boolean shouldReconstructCommandExist = missingIndexes.size() > 0 && missingIndexes.size() <= repConfig .getParity(); @@ -1176,7 +1179,7 @@ public void testMaintenanceIndexCopiedWhenContainerUnRecoverable() } else if (dnCommand .getValue() instanceof ReconstructECContainersCommand) { if (shouldReconstructCommandExist) { - assertArrayEquals(missingIndexesByteArr, + assertEquals(ECUnderReplicationHandler.integers2ByteString(missingIndexes), ((ReconstructECContainersCommand) dnCommand.getValue()) .getMissingContainerIndexes()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index 47844f32fb0d..568eba57154e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.container.replication; import com.google.common.collect.ImmutableList; +import com.google.protobuf.Proto2Utils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -55,6 +56,7 @@ import org.apache.hadoop.util.Lists; import org.apache.ozone.test.TestClock; import org.apache.ratis.protocol.exceptions.NotLeaderException; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -190,6 +192,13 @@ public void setup() throws IOException { when(scmContext.getScm()).thenReturn(scm); } + @AfterEach + void cleanup() { + if (replicationManager.getMetrics() != null) { + replicationManager.getMetrics().unRegister(); + } + } + private ReplicationManager createReplicationManager() throws IOException { return new ReplicationManager( configuration, @@ -1273,7 +1282,7 @@ public void testSendDatanodeReconstructCommand() throws NotLeaderException { ReconstructECContainersCommand command = new ReconstructECContainersCommand( containerInfo.getContainerID(), sourceNodes, targetNodes, - missingIndexes, ecRepConfig); + Proto2Utils.unsafeByteString(missingIndexes), ecRepConfig); replicationManager.sendDatanodeCommand(command, containerInfo, target4); @@ -1600,7 +1609,7 @@ private ReconstructECContainersCommand createReconstructionCommand( byte[] missingIndexes = new byte[]{4, 5}; return new ReconstructECContainersCommand( containerInfo.getContainerID(), sources, - new ArrayList<>(Arrays.asList(targets)), missingIndexes, + Arrays.asList(targets), Proto2Utils.unsafeByteString(missingIndexes), (ECReplicationConfig) repConfig); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestX509CertificateCodec.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestX509CertificateCodec.java index a6307916a193..952984db7b20 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestX509CertificateCodec.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestX509CertificateCodec.java @@ -19,6 +19,7 @@ import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Proto2Utils; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.junit.jupiter.api.Test; @@ -55,7 +56,7 @@ public void codec() throws Exception { public void testCodecError() { X509CertificateCodec x509CertificateCodec = new X509CertificateCodec(); - ByteString byteString = ByteString.copyFrom("dummy".getBytes(UTF_8)); + final ByteString byteString = Proto2Utils.unsafeByteString("dummy".getBytes(UTF_8)); assertThrows(InvalidProtocolBufferException.class, () -> x509CertificateCodec.deserialize(X509Certificate.class, byteString)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index b241ac0f2d28..f3a303cad738 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -186,7 +186,7 @@ public void testContainerPlacementCapacity() throws IOException, testDir.getAbsolutePath()); conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); SCMNodeManager scmNodeManager = createNodeManager(conf); containerManager = createContainerManager(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 06565e1b7e5a..5c04ad63210e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -864,8 +864,8 @@ public void testContainersReplicatedOnDecomDnAPI() assertEquals(1, monitor.getTrackedNodeCount()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dn1).getOperationalState()); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 2); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 0); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnderReplicated").size(), 2); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnClosed").size(), 0); DatanodeAdminMonitorTestUtil .mockGetContainerReplicaCount(repManager, @@ -877,8 +877,8 @@ public void testContainersReplicatedOnDecomDnAPI() assertEquals(1, monitor.getTrackedNodeCount()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dn1).getOperationalState()); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 0); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 2); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnderReplicated").size(), 0); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnClosed").size(), 2); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java new file mode 100644 index 000000000000..ede005745e5e --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.node; + +import org.apache.hadoop.hdds.scm.FetchMetrics; +import org.junit.jupiter.api.Test; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +class TestFetchMetrics { + private static FetchMetrics fetchMetrics = new FetchMetrics(); + + @Test + public void testFetchAll() { + String result = fetchMetrics.getMetrics(null); + Pattern p = Pattern.compile("beans", Pattern.MULTILINE); + Matcher m = p.matcher(result); + assertTrue(m.find()); + } + + @Test + public void testFetchFiltered() { + String result = fetchMetrics.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + Pattern p = Pattern.compile("beans", Pattern.MULTILINE); + Matcher m = p.matcher(result); + assertTrue(m.find()); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 09f0dd59b9f9..4511ffea5d2e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -18,20 +18,27 @@ package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.DatanodeAdminError; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mockito; import java.io.File; import java.io.IOException; @@ -39,13 +46,21 @@ import java.util.UUID; import java.util.Arrays; import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; import static java.util.Collections.singletonList; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Unit tests for the decommission manager. @@ -56,15 +71,42 @@ public class TestNodeDecommissionManager { private NodeDecommissionManager decom; private StorageContainerManager scm; private NodeManager nodeManager; + private ContainerManager containerManager; private OzoneConfiguration conf; + private static int id = 1; @BeforeEach void setup(@TempDir File dir) throws Exception { conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); - nodeManager = createNodeManager(conf); - decom = new NodeDecommissionManager(conf, nodeManager, + scm = HddsTestUtils.getScm(conf); + nodeManager = scm.getScmNodeManager(); + containerManager = mock(ContainerManager.class); + decom = new NodeDecommissionManager(conf, nodeManager, containerManager, SCMContext.emptyContext(), new EventQueue(), null); + when(containerManager.allocateContainer(any(ReplicationConfig.class), anyString())) + .thenAnswer(invocation -> createMockContainer((ReplicationConfig)invocation.getArguments()[0], + (String) invocation.getArguments()[1])); + } + + private ContainerInfo createMockContainer(ReplicationConfig rep, String owner) { + ContainerInfo.Builder builder = new ContainerInfo.Builder() + .setReplicationConfig(rep) + .setContainerID(id) + .setPipelineID(PipelineID.randomId()) + .setState(OPEN) + .setOwner(owner); + id++; + return builder.build(); + } + private ContainerInfo getMockContainer(ReplicationConfig rep, ContainerID conId) { + ContainerInfo.Builder builder = new ContainerInfo.Builder() + .setReplicationConfig(rep) + .setContainerID(conId.getId()) + .setPipelineID(PipelineID.randomId()) + .setState(OPEN) + .setOwner("admin"); + return builder.build(); } @Test @@ -99,37 +141,37 @@ public void testAnyInvalidHostThrowsException() { // Try to decommission a host that does exist, but give incorrect port List error = decom.decommissionNodes( - singletonList(dns.get(1).getIpAddress() + ":10")); + singletonList(dns.get(1).getIpAddress() + ":10"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(dns.get(1).getIpAddress()); // Try to decommission a host that does not exist - error = decom.decommissionNodes(singletonList("123.123.123.123")); + error = decom.decommissionNodes(singletonList("123.123.123.123"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains("123.123.123.123"); // Try to decommission a host that does exist and a host that does not error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), - "123,123,123,123")); + "123,123,123,123"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains("123,123,123,123"); // Try to decommission a host with many DNs on the address with no port - error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress())); + error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress()), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(dns.get(0).getIpAddress()); // Try to decommission a host with many DNs on the address with a port // that does not exist error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress() - + ":10")); + + ":10"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(dns.get(0).getIpAddress() + ":10"); // Try to decommission 2 hosts with address that does not exist // Both should return error error = decom.decommissionNodes(Arrays.asList( - "123.123.123.123", "234.234.234.234")); + "123.123.123.123", "234.234.234.234"), false); assertEquals(2, error.size()); assertTrue(error.get(0).getHostname().contains("123.123.123.123") && error.get(1).getHostname().contains("234.234.234.234")); @@ -142,7 +184,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // Decommission 2 valid nodes decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), - dns.get(2).getIpAddress())); + dns.get(2).getIpAddress()), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, @@ -151,14 +193,14 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // Running the command again gives no error - nodes already decommissioning // are silently ignored. decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), - dns.get(2).getIpAddress())); + dns.get(2).getIpAddress()), false); // Attempt to decommission dn(10) which has multiple hosts on the same IP // and we hardcoded ports to 3456, 4567, 5678 DatanodeDetails multiDn = dns.get(10); String multiAddr = multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue(); - decom.decommissionNodes(singletonList(multiAddr)); + decom.decommissionNodes(singletonList(multiAddr), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(multiDn).getOperationalState()); @@ -166,7 +208,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // dn(11) with identical ports. nodeManager.processHeartbeat(dns.get(9)); DatanodeDetails duplicatePorts = dns.get(9); - decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress())); + decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress()), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(duplicatePorts).getOperationalState()); @@ -217,13 +259,13 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts() // Attempt to decommission with just the IP, which should fail. List error = - decom.decommissionNodes(singletonList(extraDN.getIpAddress())); + decom.decommissionNodes(singletonList(extraDN.getIpAddress()), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(extraDN.getIpAddress()); // Now try the one with the unique port decom.decommissionNodes( - singletonList(extraDN.getIpAddress() + ":" + ratisPort + 1)); + singletonList(extraDN.getIpAddress() + ":" + ratisPort + 1), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(extraDN).getOperationalState()); @@ -239,7 +281,7 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts() nodeManager.processHeartbeat(expectedDN); decom.decommissionNodes(singletonList( - expectedDN.getIpAddress() + ":" + ratisPort)); + expectedDN.getIpAddress() + ":" + ratisPort), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(expectedDN).getOperationalState()); // The other duplicate is still in service @@ -260,7 +302,7 @@ public void testNodesCanBePutIntoMaintenanceAndRecommissioned() // Put 2 valid nodes into maintenance decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), - dns.get(2).getIpAddress()), 100); + dns.get(2).getIpAddress()), 100, true); assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertNotEquals(0, nodeManager.getNodeStatus( @@ -273,14 +315,14 @@ public void testNodesCanBePutIntoMaintenanceAndRecommissioned() // Running the command again gives no error - nodes already decommissioning // are silently ignored. decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), - dns.get(2).getIpAddress()), 100); + dns.get(2).getIpAddress()), 100, true); // Attempt to decommission dn(10) which has multiple hosts on the same IP // and we hardcoded ports to 3456, 4567, 5678 DatanodeDetails multiDn = dns.get(10); String multiAddr = multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue(); - decom.startMaintenanceNodes(singletonList(multiAddr), 100); + decom.startMaintenanceNodes(singletonList(multiAddr), 100, true); assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, nodeManager.getNodeStatus(multiDn).getOperationalState()); @@ -289,7 +331,7 @@ public void testNodesCanBePutIntoMaintenanceAndRecommissioned() nodeManager.processHeartbeat(dns.get(9)); DatanodeDetails duplicatePorts = dns.get(9); decom.startMaintenanceNodes(singletonList(duplicatePorts.getIpAddress()), - 100); + 100, true); assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, nodeManager.getNodeStatus(duplicatePorts).getOperationalState()); @@ -323,14 +365,14 @@ public void testNodesCannotTransitionFromDecomToMaint() throws Exception { // Try to go from maint to decom: List dn = new ArrayList<>(); dn.add(dns.get(1).getIpAddress()); - List errors = decom.decommissionNodes(dn); + List errors = decom.decommissionNodes(dn, false); assertEquals(1, errors.size()); assertEquals(dns.get(1).getHostName(), errors.get(0).getHostname()); // Try to go from decom to maint: dn = new ArrayList<>(); dn.add(dns.get(2).getIpAddress()); - errors = decom.startMaintenanceNodes(dn, 100); + errors = decom.startMaintenanceNodes(dn, 100, true); assertEquals(1, errors.size()); assertEquals(dns.get(2).getHostName(), errors.get(0).getHostname()); @@ -369,10 +411,623 @@ public void testNodeDecommissionManagerOnBecomeLeader() throws Exception { assertEquals(decom.getMonitor().getTrackedNodes().size(), 3); } - private SCMNodeManager createNodeManager(OzoneConfiguration config) - throws IOException, AuthenticationException { - scm = HddsTestUtils.getScm(config); - return (SCMNodeManager) scm.getScmNodeManager(); + @Test + public void testInsufficientNodeDecommissionThrowsExceptionForRatis() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(4)).getOperationalState()); + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(4)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionThrowsExceptionForEc() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(new ECReplicationConfig(3, 2), + (ContainerID)invocation.getArguments()[0])); + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsEC = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer(new ECReplicationConfig(3, 2), "admin"); + idsEC.add(container.containerID()); + } + + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + nodeManager.setContainers(dn, idsEC); + } + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionThrowsExceptionRatisAndEc() throws + NodeNotFoundException, IOException { + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsRatis = new HashSet<>(); + ContainerInfo containerRatis = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(containerRatis.containerID()); + Set idsEC = new HashSet<>(); + ContainerInfo containerEC = containerManager.allocateContainer(new ECReplicationConfig(3, 2), "admin"); + idsEC.add(containerEC.containerID()); + + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> { + ContainerID containerID = (ContainerID)invocation.getArguments()[0]; + if (idsEC.contains(containerID)) { + return getMockContainer(new ECReplicationConfig(3, 2), + (ContainerID)invocation.getArguments()[0]); + } + return getMockContainer(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + (ContainerID)invocation.getArguments()[0]); + }); + + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + nodeManager.setContainers(dn, idsEC); + } + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionChecksNotInService() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + + // decommission one node successfully + error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress()), false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(0)).getOperationalState()); + // try to decommission 2 nodes, one in service and one in decommissioning state, should be successful. + error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(), + dns.get(1).getIpAddress()), false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(0)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionChecksForNNF() throws + NodeNotFoundException, IOException { + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + } + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 3; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + nodeManager = mock(NodeManager.class); + decom = new NodeDecommissionManager(conf, nodeManager, containerManager, + SCMContext.emptyContext(), new EventQueue(), null); + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + when(nodeManager.getNodesByAddress(any())).thenAnswer(invocation -> + getDatanodeDetailsList((String)invocation.getArguments()[0], dns)); + when(nodeManager.getContainers(any())).thenReturn(idsRatis); + when(nodeManager.getNodeCount(any())).thenReturn(5); + + when(nodeManager.getNodeStatus(any())).thenAnswer(invocation -> + getNodeOpState((DatanodeDetails) invocation.getArguments()[0], dns)); + Mockito.doAnswer(invocation -> { + setNodeOpState((DatanodeDetails)invocation.getArguments()[0], + (HddsProtos.NodeOperationalState)invocation.getArguments()[1], dns); + return null; + }).when(nodeManager).setNodeOperationalState(any(DatanodeDetails.class), any( + HddsProtos.NodeOperationalState.class)); + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + + error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(), + dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), false); + assertFalse(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + } + + @Test + public void testInsufficientNodeMaintenanceThrowsExceptionForRatis() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + + decom.setMaintenanceConfigs(2, 1); // default config + // putting 4 DNs into maintenance leave the cluster with 1 DN, + // it should not be allowed as maintenance.replica.minimum is 2 + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), 100, false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(4)).getOperationalState()); + // putting 3 DNs into maintenance leave the cluster with 2 DN, + // it should be allowed + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress()), 100, false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + + decom.recommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress())); + decom.getMonitor().run(); + assertEquals(5, nodeManager.getNodeCount(NodeStatus.inServiceHealthy())); + + decom.setMaintenanceConfigs(3, 1); // non-default config + // putting 3 DNs into maintenance leave the cluster with 2 DN, + // it should not be allowed as maintenance.replica.minimum is 3 + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress()), 100, false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + // putting 2 DNs into maintenance leave the cluster with 2 DN, + // it should be allowed + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress()), 100, false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + + decom.recommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress())); + decom.getMonitor().run(); + assertEquals(5, nodeManager.getNodeCount(NodeStatus.inServiceHealthy())); + + // forcing 4 DNs into maintenance should be allowed + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), 100, true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(4)).getOperationalState()); + } + + @Test + public void testInsufficientNodeMaintenanceThrowsExceptionForEc() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(new ECReplicationConfig(3, 2), + (ContainerID)invocation.getArguments()[0])); + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + Set idsEC = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer(new ECReplicationConfig(3, 2), "admin"); + idsEC.add(container.containerID()); + } + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + nodeManager.setContainers(dn, idsEC); + } + + decom.setMaintenanceConfigs(2, 1); // default config + // putting 2 DNs into maintenance leave the cluster with 3 DN, + // it should not be allowed as maintenance.remaining.redundancy is 1 => 3+1=4 DNs are required + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), + 100, false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + // putting 1 DN into maintenance leave the cluster with 4 DN, + // it should be allowed + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress()), 100, false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + + decom.recommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress())); + decom.getMonitor().run(); + assertEquals(5, nodeManager.getNodeCount(NodeStatus.inServiceHealthy())); + + decom.setMaintenanceConfigs(2, 2); // non-default config + // putting 1 DNs into maintenance leave the cluster with 4 DN, + // it should not be allowed as maintenance.remaining.redundancy is 2 => 3+2=5 DNs are required + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress()), 100, false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + + decom.recommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress())); + decom.getMonitor().run(); + assertEquals(5, nodeManager.getNodeCount(NodeStatus.inServiceHealthy())); + + // forcing 2 DNs into maintenance should be allowed + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), 100, true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + } + + @Test + public void testInsufficientNodeMaintenanceThrowsExceptionForRatisAndEc() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(new ECReplicationConfig(3, 2), + (ContainerID)invocation.getArguments()[0])); + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + Set idsRatis = new HashSet<>(); + ContainerInfo containerRatis = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(containerRatis.containerID()); + Set idsEC = new HashSet<>(); + ContainerInfo containerEC = containerManager.allocateContainer(new ECReplicationConfig(3, 2), "admin"); + idsEC.add(containerEC.containerID()); + + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> { + ContainerID containerID = (ContainerID)invocation.getArguments()[0]; + if (idsEC.contains(containerID)) { + return getMockContainer(new ECReplicationConfig(3, 2), + (ContainerID)invocation.getArguments()[0]); + } + return getMockContainer(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + (ContainerID)invocation.getArguments()[0]); + }); + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + nodeManager.setContainers(dn, idsEC); + } + + decom.setMaintenanceConfigs(2, 1); // default config + // putting 2 DNs into maintenance leave the cluster with 3 DN, + // it should not be allowed as maintenance.remaining.redundancy is 1 => 3+1=4 DNs are required + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), + 100, false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + // putting 1 DN into maintenance leave the cluster with 4 DN, + // it should be allowed + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress()), 100, false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + + decom.recommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress())); + decom.getMonitor().run(); + assertEquals(5, nodeManager.getNodeCount(NodeStatus.inServiceHealthy())); + + decom.setMaintenanceConfigs(3, 2); // non-default config + // putting 1 DNs into maintenance leave the cluster with 4 DN, + // it should not be allowed as for EC, maintenance.remaining.redundancy is 2 => 3+2=5 DNs are required + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress()), 100, false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + + decom.recommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress())); + decom.getMonitor().run(); + assertEquals(5, nodeManager.getNodeCount(NodeStatus.inServiceHealthy())); + + // forcing 2 DNs into maintenance should be allowed + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), 100, true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + } + + @Test + public void testInsufficientNodeMaintenanceChecksNotInService() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + + List error; + List dns = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + + // put 2 nodes into maintenance successfully + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(0).getIpAddress(), dns.get(1).getIpAddress()), + 100, false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(0)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + // try to put 3 nodes into maintenance, 1 in service and 2 in ENTER_MAINTENANCE state, should be successful. + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(0).getIpAddress(), + dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), 100, false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(0)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + } + + @Test + public void testInsufficientNodeMaintenanceChecksForNNF() throws + NodeNotFoundException, IOException { + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + } + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 3; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + nodeManager = mock(NodeManager.class); + decom = new NodeDecommissionManager(conf, nodeManager, containerManager, + SCMContext.emptyContext(), new EventQueue(), null); + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + when(nodeManager.getNodesByAddress(any())).thenAnswer(invocation -> + getDatanodeDetailsList((String)invocation.getArguments()[0], dns)); + when(nodeManager.getContainers(any())).thenReturn(idsRatis); + when(nodeManager.getNodeCount(any())).thenReturn(5); + when(nodeManager.getNodeStatus(any())).thenAnswer(invocation -> + getNodeOpState((DatanodeDetails) invocation.getArguments()[0], dns)); + Mockito.doAnswer(invocation -> { + setNodeOpState((DatanodeDetails)invocation.getArguments()[0], + (HddsProtos.NodeOperationalState)invocation.getArguments()[1], dns); + return null; + }).when(nodeManager).setNodeOperationalState(any(DatanodeDetails.class), any( + HddsProtos.NodeOperationalState.class)); + Mockito.doAnswer(invocation -> { + setNodeOpState((DatanodeDetails)invocation.getArguments()[0], + (HddsProtos.NodeOperationalState)invocation.getArguments()[1], dns); + return null; + }).when(nodeManager).setNodeOperationalState(any(DatanodeDetails.class), any( + HddsProtos.NodeOperationalState.class), any(Long.class)); + + // trying to put 4 available DNs into maintenance, + // it should not be allowed as it leaves the cluster with 1 DN and maintenance.replica.minimum is 2 + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), 100, false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(4)).getOperationalState()); + // trying to put 4 DNs (3 available + 1 not found) into maintenance, + // it should be allowed as effectively, it tries to move 3 DNs to maintenance, + // leaving the cluster with 2 DNs + error = decom.startMaintenanceNodes(Arrays.asList(dns.get(0).getIpAddress(), + dns.get(1).getIpAddress(), dns.get(2).getIpAddress(), dns.get(3).getIpAddress()), 100, false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + } + + private List getDatanodeDetailsList(String ipaddress, List dns) { + List datanodeDetails = new ArrayList<>(); + for (DatanodeDetails dn : dns) { + if (dn.getIpAddress().equals(ipaddress)) { + datanodeDetails.add(dn); + break; + } + } + return datanodeDetails; + } + + private void setNodeOpState(DatanodeDetails dn, HddsProtos.NodeOperationalState newState, List dns) { + for (DatanodeDetails datanode : dns) { + if (datanode.equals(dn)) { + datanode.setPersistedOpState(newState); + break; + } + } + } + + private NodeStatus getNodeOpState(DatanodeDetails dn, List dns) throws NodeNotFoundException { + if (dn.equals(dns.get(0))) { + throw new NodeNotFoundException(); + } + for (DatanodeDetails datanode : dns) { + if (datanode.equals(dn)) { + return new NodeStatus(datanode.getPersistedOpState(), HddsProtos.NodeState.HEALTHY); + } + } + return null; } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 58f65df8fd85..6a4cebe9c7a9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -151,10 +151,10 @@ public void testGetVersionTask() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf)); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); @@ -179,14 +179,14 @@ public void testGetVersionTask() throws Exception { */ @Test public void testDeletedContainersClearedOnStartup() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); + OzoneContainer ozoneContainer = createVolume(ozoneConf); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { - OzoneContainer ozoneContainer = createVolume(ozoneConf); HddsVolume hddsVolume = (HddsVolume) ozoneContainer.getVolumeSet() .getVolumesList().get(0); KeyValueContainer kvContainer = addContainer(ozoneConf, hddsVolume); @@ -212,17 +212,19 @@ public void testDeletedContainersClearedOnStartup() throws Exception { hddsVolume.getDeletedContainerDir().listFiles(); assertNotNull(leftoverContainers); assertEquals(0, leftoverContainers.length); + } finally { + ozoneContainer.stop(); } } @Test public void testCheckVersionResponse() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -267,7 +269,7 @@ public void testCheckVersionResponse() throws Exception { */ @Test public void testDnLayoutVersionFile() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -579,7 +581,7 @@ private StateContext heartbeatTaskHelper( // Mini Ozone cluster will not come up if the port is not true, since // Ratis will exit if the server port cannot be bound. We can remove this // hard coding once we fix the Ratis default behaviour. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); // Create a datanode state machine for stateConext used by endpoint task try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java index 093dd93430b9..cc496a28e777 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java @@ -22,13 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.NativeCodeLoader; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; import picocli.CommandLine; /** @@ -75,12 +69,6 @@ public UserGroupInformation getUser() throws IOException { * @param argv - System Args Strings[] */ public static void main(String[] argv) { - LogManager.resetConfiguration(); - Logger.getRootLogger().setLevel(Level.INFO); - Logger.getRootLogger() - .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); - Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); - new OzoneAdmin().run(argv); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java index 158bc6da7b89..17885eecc975 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java @@ -38,49 +38,97 @@ public class ContainerBalancerStartSubcommand extends ScmSubcommand { @Option(names = {"-t", "--threshold"}, description = "Percentage deviation from average utilization of " + - "the cluster after which a datanode will be rebalanced (for " + - "example, '10' for 10%%).") + "the cluster after which a datanode will be rebalanced. The value " + + "should be in the range [0.0, 100.0), with a default of 10 " + + "(specify '10' for 10%%).") private Optional threshold; @Option(names = {"-i", "--iterations"}, - description = "Maximum consecutive iterations that" + - " balancer will run for.") + description = "Maximum consecutive iterations that " + + "balancer will run for. The value should be positive " + + "or -1, with a default of 10 (specify '10' for 10 iterations).") private Optional iterations; @Option(names = {"-d", "--max-datanodes-percentage-to-involve-per-iteration", "--maxDatanodesPercentageToInvolvePerIteration"}, description = "Max percentage of healthy, in service datanodes " + - "that can be involved in balancing in one iteration (for example, " + + "that can be involved in balancing in one iteration. The value " + + "should be in the range [0,100], with a default of 20 (specify " + "'20' for 20%%).") private Optional maxDatanodesPercentageToInvolvePerIteration; @Option(names = {"-s", "--max-size-to-move-per-iteration-in-gb", "--maxSizeToMovePerIterationInGB"}, description = "Maximum size that can be moved per iteration of " + - "balancing (for example, '500' for 500GB).") + "balancing. The value should be positive, with a default of 500 " + + "(specify '500' for 500GB).") private Optional maxSizeToMovePerIterationInGB; @Option(names = {"-e", "--max-size-entering-target-in-gb", "--maxSizeEnteringTargetInGB"}, description = "Maximum size that can enter a target datanode while " + - "balancing. This is the sum of data from multiple sources (for " + - "example, '26' for 26GB).") + "balancing. This is the sum of data from multiple sources. The value " + + "should be positive, with a default of 26 (specify '26' for 26GB).") private Optional maxSizeEnteringTargetInGB; @Option(names = {"-l", "--max-size-leaving-source-in-gb", "--maxSizeLeavingSourceInGB"}, description = "Maximum size that can leave a source datanode while " + - "balancing. This is the sum of data moving to multiple targets " + - "(for example, '26' for 26GB).") + "balancing. This is the sum of data moving to multiple targets. " + + "The value should be positive, with a default of 26 " + + "(specify '26' for 26GB).") private Optional maxSizeLeavingSourceInGB; + @Option(names = {"--balancing-iteration-interval-minutes"}, + description = "The interval period in minutes between each iteration of Container Balancer. " + + "The value should be positive, with a default of 70 (specify '70' for 70 minutes).") + private Optional balancingInterval; + + @Option(names = {"--move-timeout-minutes"}, + description = "The amount of time in minutes to allow a single container to move " + + "from source to target. The value should be positive, with a default of 65 " + + "(specify '65' for 65 minutes).") + private Optional moveTimeout; + + @Option(names = {"--move-replication-timeout-minutes"}, + description = "The " + + "amount of time in minutes to allow a single container's replication from source " + + "to target as part of container move. The value should be positive, with " + + "a default of 50. For example, if \"hdds.container" + + ".balancer.move.timeout\" is 65 minutes, then out of those 65 minutes " + + "50 minutes will be the deadline for replication to complete (specify " + + "'50' for 50 minutes).") + private Optional moveReplicationTimeout; + + @Option(names = {"--move-network-topology-enable"}, + description = "Whether to take network topology into account when " + + "selecting a target for a source. " + + "This configuration is false by default.") + private Optional networkTopologyEnable; + + @Option(names = {"--include-datanodes"}, + description = "A list of Datanode " + + "hostnames or ip addresses separated by commas. Only the Datanodes " + + "specified in this list are balanced. This configuration is empty by " + + "default and is applicable only if it is non-empty (specify \"hostname1,hostname2,hostname3\").") + private Optional includeNodes; + + @Option(names = {"--exclude-datanodes"}, + description = "A list of Datanode " + + "hostnames or ip addresses separated by commas. The Datanodes specified " + + "in this list are excluded from balancing. This configuration is empty " + + "by default (specify \"hostname1,hostname2,hostname3\").") + private Optional excludeNodes; + @Override public void execute(ScmClient scmClient) throws IOException { StartContainerBalancerResponseProto response = scmClient. startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); if (response.getStart()) { System.out.println("Container Balancer started successfully."); } else { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java index 89e7680f31c5..c15109a32784 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java @@ -34,7 +34,8 @@ public class ContainerBalancerStopSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { + System.out.println("Sending stop command. Waiting for Container Balancer to stop..."); scmClient.stopContainerBalancer(); - System.out.println("Stopping ContainerBalancer..."); + System.out.println("Container Balancer stopped."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index d07e696e7ef0..0dd52cd291ab 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -98,8 +98,8 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); boolean useRatis = conf.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); if (useRatis) { replicationFactor = HddsProtos.ReplicationFactor.THREE; replicationType = HddsProtos.ReplicationType.RATIS; @@ -184,7 +184,7 @@ public void createContainer(XceiverClientSpi client, } } - private String getEncodedContainerToken(long containerId) throws IOException { + public String getEncodedContainerToken(long containerId) throws IOException { if (!containerTokenEnabled) { return ""; } @@ -237,9 +237,9 @@ public HddsProtos.Node queryNode(UUID uuid) throws IOException { } @Override - public List decommissionNodes(List hosts) + public List decommissionNodes(List hosts, boolean force) throws IOException { - return storageContainerLocationClient.decommissionNodes(hosts); + return storageContainerLocationClient.decommissionNodes(hosts, force); } @Override @@ -250,9 +250,9 @@ public List recommissionNodes(List hosts) @Override public List startMaintenanceNodes(List hosts, - int endHours) throws IOException { + int endHours, boolean force) throws IOException { return storageContainerLocationClient.startMaintenanceNodes( - hosts, endHours); + hosts, endHours, force); } @Override @@ -483,12 +483,19 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) - throws IOException { + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException { return storageContainerLocationClient.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); } @Override @@ -563,4 +570,9 @@ public DecommissionScmResponseProto decommissionScm( return storageContainerLocationClient.decommissionScm(scmId); } + @Override + public String getMetrics(String query) throws IOException { + return storageContainerLocationClient.getMetrics(query); + } + } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java index ff82b82ec87a..29f2f3d45727 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import java.io.IOException; @@ -35,12 +33,9 @@ versionProvider = HddsVersionProvider.class) public class ReplicationManagerStartSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStartSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { scmClient.startReplicationManager(); - LOG.info("Starting ReplicationManager..."); + System.out.println("Starting ReplicationManager..."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java index 9bc3649dd9f0..b2e308e14227 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import java.io.IOException; @@ -35,18 +33,15 @@ versionProvider = HddsVersionProvider.class) public class ReplicationManagerStatusSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStatusSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.getReplicationManagerStatus(); // Output data list if (execReturn) { - LOG.info("ReplicationManager is Running."); + System.out.println("ReplicationManager is Running."); } else { - LOG.info("ReplicationManager is Not Running."); + System.out.println("ReplicationManager is Not Running."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java index 7d3063a7636c..12de13c07d26 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import java.io.IOException; @@ -35,14 +33,11 @@ versionProvider = HddsVersionProvider.class) public class ReplicationManagerStopSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStopSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { scmClient.stopReplicationManager(); - LOG.info("Stopping ReplicationManager..."); - LOG.info("Requested SCM to stop ReplicationManager, " + + System.out.println("Stopping ReplicationManager..."); + System.out.println("Requested SCM to stop ReplicationManager, " + "it might take sometime for the ReplicationManager to stop."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java index db2f02c5e125..747215dcac71 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java @@ -24,8 +24,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; @@ -39,9 +37,6 @@ versionProvider = HddsVersionProvider.class) public class SafeModeCheckSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeCheckSubcommand.class); - @CommandLine.Option(names = {"--verbose"}, description = "Show detailed status of rules.") private boolean verbose; @@ -52,17 +47,17 @@ public void execute(ScmClient scmClient) throws IOException { // Output data list if (execReturn) { - LOG.info("SCM is in safe mode."); + System.out.println("SCM is in safe mode."); if (verbose) { for (Map.Entry> entry : scmClient.getSafeModeRuleStatuses().entrySet()) { Pair value = entry.getValue(); - LOG.info("validated:{}, {}, {}", + System.out.printf("validated:%s, %s, %s%n", value.getLeft(), entry.getKey(), value.getRight()); } } } else { - LOG.info("SCM is out of safe mode."); + System.out.println("SCM is out of safe mode."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java index bcf64deb85e2..e4173c9767e3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java @@ -22,8 +22,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; /** @@ -36,14 +34,11 @@ versionProvider = HddsVersionProvider.class) public class SafeModeExitSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeExitSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.forceExitSafeMode(); if (execReturn) { - LOG.info("SCM exit safe mode successfully."); + System.out.println("SCM exit safe mode successfully."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java index abaca08cfbb9..ad94d4fffd0d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java @@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; import picocli.CommandLine.Mixin; @@ -39,9 +37,6 @@ versionProvider = HddsVersionProvider.class) public class SafeModeWaitSubcommand implements Callable { - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeWaitSubcommand.class); - @Option(description = "Define timeout (in second) to wait until (exit code 1) " + "or until safemode is ended (exit code 0).", defaultValue = "30", @@ -62,26 +57,26 @@ public Void call() throws Exception { long remainingTime; do { if (!scmClient.inSafeMode()) { - LOG.info("SCM is out of safe mode."); + System.out.println("SCM is out of safe mode."); return null; } remainingTime = getRemainingTimeInSec(); if (remainingTime > 0) { - LOG.info( + System.out.printf( "SCM is in safe mode. Will retry in 1 sec. Remaining time " - + "(sec): {}", + + "(sec): %s%n", remainingTime); Thread.sleep(1000); } else { - LOG.info("SCM is in safe mode. No more retries."); + System.out.println("SCM is in safe mode. No more retries."); } } while (remainingTime > 0); } catch (InterruptedException ex) { - LOG.info( - "SCM is not available (yet?). Error is {}. Will retry in 1 sec. " - + "Remaining time (sec): {}", + System.out.printf( + "SCM is not available (yet?). Error is %s. Will retry in 1 sec. " + + "Remaining time (sec): %s%n", ex.getMessage(), getRemainingTimeInSec()); Thread.sleep(1000); Thread.currentThread().interrupt(); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java index cab7a29a4ea6..09caf8147ad4 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import java.io.IOException; @@ -36,13 +34,10 @@ versionProvider = HddsVersionProvider.class) public class CleanExpiredCertsSubcommand extends ScmCertSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(CleanExpiredCertsSubcommand.class); - @Override protected void execute(SCMSecurityProtocol client) throws IOException { List pemEncodedCerts = client.removeExpiredCertificates(); - LOG.info("List of removed expired certificates:"); - printCertList(LOG, pemEncodedCerts); + System.out.println("List of removed expired certificates:"); + printCertList(pemEncodedCerts); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java index 6177c8f7ff4e..c708d424d9c9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java @@ -26,12 +26,8 @@ import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Parameters; -import picocli.CommandLine.Spec; /** * This is the handler that process certificate info command. @@ -44,12 +40,6 @@ class InfoSubcommand extends ScmCertSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(InfoSubcommand.class); - - @Spec - private CommandSpec spec; - @Parameters(description = "Serial id of the certificate in decimal.") private String serialId; @@ -61,12 +51,12 @@ public void execute(SCMSecurityProtocol client) throws IOException { "Certificate can't be found"); // Print container report info. - LOG.info("Certificate id: {}", serialId); + System.out.printf("Certificate id: %s%n", serialId); try { X509Certificate cert = CertificateCodec.getX509Certificate(certPemStr); - LOG.info(cert.toString()); + System.out.println(cert); } catch (CertificateException ex) { - LOG.error("Failed to get certificate id " + serialId); + System.err.println("Failed to get certificate id " + serialId); throw new IOException("Fail to get certificate id " + serialId, ex); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java index c2e0bd7fadff..ea0898381478 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java @@ -36,8 +36,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.server.JsonUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -54,9 +52,6 @@ versionProvider = HddsVersionProvider.class) public class ListSubcommand extends ScmCertSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ListSubcommand.class); - @Option(names = {"-s", "--start"}, description = "Certificate serial id to start the iteration", defaultValue = "0", showDefaultValue = Visibility.ALWAYS) @@ -114,7 +109,7 @@ protected void execute(SCMSecurityProtocol client) throws IOException { CertificateCodec.getX509Certificate(certPemStr); certList.add(new Certificate(cert)); } catch (CertificateException ex) { - LOG.error("Failed to parse certificate."); + err.println("Failed to parse certificate."); } } System.out.println( @@ -122,9 +117,9 @@ protected void execute(SCMSecurityProtocol client) throws IOException { return; } - LOG.info("Certificate list:(Type={}, BatchSize={}, CertCount={})", + System.out.printf("Certificate list:(Type=%s, BatchSize=%s, CertCount=%s)%n", type.toUpperCase(), count, certPemList.size()); - printCertList(LOG, certPemList); + printCertList(certPemList); } private static class BigIntJsonSerializer extends JsonSerializer { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java index d7ebb44e0ffc..354adbb5d6ba 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.scm.cli.ScmOption; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.slf4j.Logger; import picocli.CommandLine; import java.io.IOException; @@ -37,29 +36,29 @@ public abstract class ScmCertSubcommand implements Callable { @CommandLine.Mixin private ScmOption scmOption; - private static final String OUTPUT_FORMAT = "%-17s %-30s %-30s %-110s %-110s"; + private static final String OUTPUT_FORMAT = "%-17s %-30s %-30s %-110s %-110s%n"; - protected void printCertList(Logger log, List pemEncodedCerts) { + protected void printCertList(List pemEncodedCerts) { if (pemEncodedCerts.isEmpty()) { - log.info("No certificates to list"); + System.out.println("No certificates to list"); return; } - log.info(String.format(OUTPUT_FORMAT, "SerialNumber", "Valid From", - "Expiry", "Subject", "Issuer")); + System.out.printf(OUTPUT_FORMAT, "SerialNumber", "Valid From", + "Expiry", "Subject", "Issuer"); for (String certPemStr : pemEncodedCerts) { try { X509Certificate cert = CertificateCodec.getX509Certificate(certPemStr); - printCert(cert, log); + printCert(cert); } catch (CertificateException e) { - log.error("Failed to parse certificate.", e); + System.err.println("Failed to parse certificate: " + e.getMessage()); } } } - protected void printCert(X509Certificate cert, Logger log) { - log.info(String.format(OUTPUT_FORMAT, cert.getSerialNumber(), + protected void printCert(X509Certificate cert) { + System.out.printf(OUTPUT_FORMAT, cert.getSerialNumber(), cert.getNotBefore(), cert.getNotAfter(), cert.getSubjectDN(), - cert.getIssuerDN())); + cert.getIssuerDN()); } protected abstract void execute(SCMSecurityProtocol client) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java index 9eedbf858958..313dc64c9fc9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java @@ -25,8 +25,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -40,9 +38,6 @@ versionProvider = HddsVersionProvider.class) public class CreateSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(CreateSubcommand.class); - @Option(description = "Owner of the new container", defaultValue = "OZONE", names = { "-o", "--owner"}) private String owner; @@ -50,7 +45,7 @@ public class CreateSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { ContainerWithPipeline container = scmClient.createContainer(owner); - LOG.info("Container {} is created.", + System.out.printf("Container %s is created.%n", container.getContainerInfo().getContainerID()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java index 8ed9f520b29d..0e67661bba1d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java @@ -45,8 +45,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.server.JsonUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; @@ -63,9 +61,6 @@ versionProvider = HddsVersionProvider.class) public class InfoSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(InfoSubcommand.class); - @Spec private CommandSpec spec; @@ -126,13 +121,13 @@ private void printOutput(ScmClient scmClient, String id, boolean first) private void printHeader() { if (json && multiContainer) { - LOG.info("["); + System.out.println("["); } } private void printFooter() { if (json && multiContainer) { - LOG.info("]"); + System.out.println("]"); } } @@ -142,9 +137,9 @@ private void printError(String error) { private void printBreak() { if (json) { - LOG.info(","); + System.out.println(","); } else { - LOG.info(""); + System.out.println(""); } } @@ -175,47 +170,47 @@ private void printDetails(ScmClient scmClient, long containerID, new ContainerWithPipelineAndReplicas(container.getContainerInfo(), container.getPipeline(), replicas, container.getContainerInfo().getPipelineID()); - LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); } else { ContainerWithoutDatanodes wrapper = new ContainerWithoutDatanodes(container.getContainerInfo(), container.getPipeline(), replicas, container.getContainerInfo().getPipelineID()); - LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); } } else { // Print container report info. - LOG.info("Container id: {}", containerID); + System.out.printf("Container id: %s%n", containerID); boolean verbose = spec != null && spec.root().userObject() instanceof GenericParentCommand && ((GenericParentCommand) spec.root().userObject()).isVerbose(); if (verbose) { - LOG.info("Pipeline Info: {}", container.getPipeline()); + System.out.printf("Pipeline Info: %s%n", container.getPipeline()); } else { - LOG.info("Pipeline id: {}", container.getPipeline().getId().getId()); + System.out.printf("Pipeline id: %s%n", container.getPipeline().getId().getId()); } - LOG.info("Write PipelineId: {}", + System.out.printf("Write PipelineId: %s%n", container.getContainerInfo().getPipelineID().getId()); try { String pipelineState = scmClient.getPipeline( container.getContainerInfo().getPipelineID().getProtobuf()) .getPipelineState().toString(); - LOG.info("Write Pipeline State: {}", pipelineState); + System.out.printf("Write Pipeline State: %s%n", pipelineState); } catch (IOException ioe) { if (SCMHAUtils.unwrapException( ioe) instanceof PipelineNotFoundException) { - LOG.info("Write Pipeline State: CLOSED"); + System.out.println("Write Pipeline State: CLOSED"); } else { printError("Failed to retrieve pipeline info"); } } - LOG.info("Container State: {}", container.getContainerInfo().getState()); + System.out.printf("Container State: %s%n", container.getContainerInfo().getState()); // Print pipeline of an existing container. String machinesStr = container.getPipeline().getNodes().stream().map( InfoSubcommand::buildDatanodeDetails) .collect(Collectors.joining(",\n")); - LOG.info("Datanodes: [{}]", machinesStr); + System.out.printf("Datanodes: [%s]%n", machinesStr); // Print the replica details if available if (replicas != null) { @@ -223,7 +218,7 @@ private void printDetails(ScmClient scmClient, long containerID, .sorted(Comparator.comparing(ContainerReplicaInfo::getReplicaIndex)) .map(InfoSubcommand::buildReplicaDetails) .collect(Collectors.joining(",\n")); - LOG.info("Replicas: [{}]", replicaStr); + System.out.printf("Replicas: [%s]%n", replicaStr); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index b120fe4169da..ecc43d04087a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -36,8 +36,6 @@ import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -52,9 +50,6 @@ versionProvider = HddsVersionProvider.class) public class ListSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ListSubcommand.class); - @Option(names = {"-s", "--start"}, description = "Container id to start the iteration") private long startId; @@ -94,7 +89,7 @@ public class ListSubcommand extends ScmSubcommand { private void outputContainerInfo(ContainerInfo containerInfo) throws IOException { // Print container report info. - LOG.info("{}", WRITER.writeValueAsString(containerInfo)); + System.out.println(WRITER.writeValueAsString(containerInfo)); } @Override diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index b53632f8eec5..18ddbd086d7a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -17,16 +17,21 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import com.fasterxml.jackson.databind.JsonNode; import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.client.DecommissionUtils; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -55,43 +60,115 @@ public class DecommissionStatusSubCommand extends ScmSubcommand { defaultValue = "") private String ipAddress; + @CommandLine.Option(names = { "--json" }, + description = "Show output in json format", + defaultValue = "false") + private boolean json; + @Override public void execute(ScmClient scmClient) throws IOException { - List decommissioningNodes; Stream allNodes = scmClient.queryNode(DECOMMISSIONING, null, HddsProtos.QueryScope.CLUSTER, "").stream(); + List decommissioningNodes = + DecommissionUtils.getDecommissioningNodesList(allNodes, uuid, ipAddress); if (!Strings.isNullOrEmpty(uuid)) { - decommissioningNodes = allNodes.filter(p -> p.getNodeID().getUuid() - .equals(uuid)).collect(Collectors.toList()); if (decommissioningNodes.isEmpty()) { System.err.println("Datanode: " + uuid + " is not in DECOMMISSIONING"); return; } } else if (!Strings.isNullOrEmpty(ipAddress)) { - decommissioningNodes = allNodes.filter(p -> p.getNodeID().getIpAddress() - .compareToIgnoreCase(ipAddress) == 0).collect(Collectors.toList()); if (decommissioningNodes.isEmpty()) { System.err.println("Datanode: " + ipAddress + " is not in " + "DECOMMISSIONING"); return; } } else { - decommissioningNodes = allNodes.collect(Collectors.toList()); - System.out.println("\nDecommission Status: DECOMMISSIONING - " + - decommissioningNodes.size() + " node(s)"); + if (!json) { + System.out.println("\nDecommission Status: DECOMMISSIONING - " + + decommissioningNodes.size() + " node(s)"); + } + } + + String metricsJson = scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + int numDecomNodes = -1; + JsonNode jsonNode = null; + if (metricsJson != null) { + jsonNode = DecommissionUtils.getBeansJsonNode(metricsJson); + numDecomNodes = DecommissionUtils.getNumDecomNodes(jsonNode); + } + + if (json) { + List> decommissioningNodesDetails = new ArrayList<>(); + + for (HddsProtos.Node node : decommissioningNodes) { + DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( + node.getNodeID()); + Map datanodeMap = new LinkedHashMap<>(); + datanodeMap.put("datanodeDetails", datanode); + datanodeMap.put("metrics", getCounts(datanode, jsonNode, numDecomNodes)); + datanodeMap.put("containers", getContainers(scmClient, datanode)); + decommissioningNodesDetails.add(datanodeMap); + } + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(decommissioningNodesDetails)); + return; } for (HddsProtos.Node node : decommissioningNodes) { DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( node.getNodeID()); printDetails(datanode); + printCounts(datanode, jsonNode, numDecomNodes); Map> containers = scmClient.getContainersOnDecomNode(datanode); System.out.println(containers); } } + + private String errorMessage = "Error getting pipeline and container metrics for "; + + public String getErrorMessage() { + return errorMessage; + } + + public void setErrorMessage(String errorMessage) { + this.errorMessage = errorMessage; + } + private void printDetails(DatanodeDetails datanode) { System.out.println("\nDatanode: " + datanode.getUuid().toString() + " (" + datanode.getNetworkLocation() + "/" + datanode.getIpAddress() + "/" + datanode.getHostName() + ")"); } + + private void printCounts(DatanodeDetails datanode, JsonNode counts, int numDecomNodes) { + Map countsMap = getCounts(datanode, counts, numDecomNodes); + System.out.println("Decommission Started At : " + countsMap.get("decommissionStartTime")); + System.out.println("No. of Unclosed Pipelines: " + countsMap.get("numOfUnclosedPipelines")); + System.out.println("No. of UnderReplicated Containers: " + countsMap.get("numOfUnderReplicatedContainers")); + System.out.println("No. of Unclosed Containers: " + countsMap.get("numOfUnclosedContainers")); + } + + private Map getCounts(DatanodeDetails datanode, JsonNode counts, int numDecomNodes) { + Map countsMap = new LinkedHashMap<>(); + String errMsg = getErrorMessage() + datanode.getHostName(); + try { + countsMap = DecommissionUtils.getCountsMap(datanode, counts, numDecomNodes, countsMap, errMsg); + if (countsMap != null) { + return countsMap; + } + System.err.println(errMsg); + } catch (IOException e) { + System.err.println(errMsg); + } + return countsMap; + } + + private Map getContainers(ScmClient scmClient, DatanodeDetails datanode) throws IOException { + Map> containers = scmClient.getContainersOnDecomNode(datanode); + return containers.entrySet().stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + entry -> entry.getValue().stream(). + map(ContainerID::toString). + collect(Collectors.toList()))); + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java index e7d3a4443831..31123ae81b51 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java @@ -48,6 +48,11 @@ public class DecommissionSubCommand extends ScmSubcommand { paramLabel = "") private List parameters = new ArrayList<>(); + @CommandLine.Option(names = { "--force" }, + defaultValue = "false", + description = "Forcefully try to decommission the datanode(s)") + private boolean force; + @Override public void execute(ScmClient scmClient) throws IOException { if (parameters.size() > 0) { @@ -62,7 +67,7 @@ public void execute(ScmClient scmClient) throws IOException { } else { hosts = parameters; } - List errors = scmClient.decommissionNodes(hosts); + List errors = scmClient.decommissionNodes(hosts, force); System.out.println("Started decommissioning datanode(s):\n" + String.join("\n", hosts)); if (errors.size() > 0) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java index 82d263b416fb..b07af660a8f2 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java @@ -53,6 +53,12 @@ public class MaintenanceSubCommand extends ScmSubcommand { "By default, maintenance must be ended manually.") private int endInHours = 0; + @CommandLine.Option(names = { "--force" }, + defaultValue = "false", + description = "Forcefully try to decommission the datanode(s)") + private boolean force; + + @Override public void execute(ScmClient scmClient) throws IOException { if (parameters.size() > 0) { @@ -68,7 +74,7 @@ public void execute(ScmClient scmClient) throws IOException { hosts = parameters; } List errors = - scmClient.startMaintenanceNodes(hosts, endInHours); + scmClient.startMaintenanceNodes(hosts, endInHours, force); System.out.println("Entering maintenance mode on datanode(s):\n" + String.join("\n", hosts)); if (errors.size() > 0) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java index 78b83e56db07..7c70456995b4 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java @@ -18,13 +18,19 @@ package org.apache.hadoop.hdds.scm.cli.pipeline; +import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import picocli.CommandLine; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.function.Predicate; /** * Handler of close pipeline command. @@ -35,13 +41,49 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) public class ClosePipelineSubcommand extends ScmSubcommand { + @CommandLine.ArgGroup(multiplicity = "1") + private CloseOptionGroup closeOption; - @CommandLine.Parameters(description = "ID of the pipeline to close") - private String pipelineId; + @CommandLine.Mixin + private final FilterPipelineOptions filterOptions = new FilterPipelineOptions(); @Override public void execute(ScmClient scmClient) throws IOException { - scmClient.closePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); + if (!Strings.isNullOrEmpty(closeOption.pipelineId)) { + if (filterOptions.getReplicationFilter().isPresent()) { + throw new IllegalArgumentException("Replication filters can only be used with --all"); + } + scmClient.closePipeline(HddsProtos.PipelineID.newBuilder().setId(closeOption.pipelineId).build()); + } else if (closeOption.closeAll) { + Optional> replicationFilter = filterOptions.getReplicationFilter(); + + List pipelineList = new ArrayList<>(); + Predicate predicate = replicationFilter.orElse(null); + for (Pipeline pipeline : scmClient.listPipelines()) { + boolean filterPassed = (predicate != null) && predicate.test(pipeline); + if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) { + pipelineList.add(pipeline); + } + } + System.out.println("Sending close command for " + pipelineList.size() + " pipelines..."); + pipelineList.forEach(pipeline -> { + try { + scmClient.closePipeline( + HddsProtos.PipelineID.newBuilder().setId(pipeline.getId().getId().toString()).build()); + } catch (IOException e) { + System.err.println("Error closing pipeline: " + pipeline.getId() + ", cause: " + e.getMessage()); + } + }); + } + } + + private static class CloseOptionGroup { + @CommandLine.Parameters(description = "ID of the pipeline to close") + private String pipelineId; + + @CommandLine.Option( + names = {"--all"}, + description = "Close all pipelines") + private boolean closeAll; } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java index 86299eec2d05..209acd32230e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java @@ -39,15 +39,16 @@ public class CreatePipelineSubcommand extends ScmSubcommand { @CommandLine.Option( names = {"-t", "--replication-type", "--replicationType"}, - description = "Replication type (STAND_ALONE, RATIS). Full name" + + description = "Replication type is RATIS. Full name" + " --replicationType will be removed in later versions.", - defaultValue = "STAND_ALONE" + defaultValue = "RATIS", + hidden = true ) private HddsProtos.ReplicationType type; @CommandLine.Option( names = {"-f", "--replication-factor", "--replicationFactor"}, - description = "Replication factor (ONE, THREE). Full name" + + description = "Replication factor for RATIS (ONE, THREE). Full name" + " --replicationFactor will be removed in later versions.", defaultValue = "ONE" ) @@ -62,7 +63,8 @@ public void execute(ScmClient scmClient) throws IOException { // As I see there is no way to specify ECReplicationConfig properly here // so failing the request if type is EC, seems to be safe. if (type == HddsProtos.ReplicationType.CHAINED - || type == HddsProtos.ReplicationType.EC) { + || type == HddsProtos.ReplicationType.EC + || type == HddsProtos.ReplicationType.STAND_ALONE) { throw new IllegalArgumentException(type.name() + " is not supported yet."); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/FilterPipelineOptions.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/FilterPipelineOptions.java new file mode 100644 index 000000000000..afb61c1dd62c --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/FilterPipelineOptions.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.pipeline; + +import com.google.common.base.Strings; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import picocli.CommandLine; + +import java.util.Optional; +import java.util.function.Predicate; + +/** + * Defines command-line option for filtering pipelines. + */ +public class FilterPipelineOptions { + @CommandLine.Option( + names = {"-t", "--type"}, + description = "Filter pipelines by replication type, RATIS or EC", + defaultValue = "") + private String replicationType; + + @CommandLine.Option( + names = {"-r", "--replication"}, + description = "Filter pipelines by replication, eg ONE, THREE or for EC rs-3-2-1024k", + defaultValue = "") + private String replication; + + @CommandLine.Option( + names = {"-ffc", "--filterByFactor", "--filter-by-factor"}, + description = "[deprecated] Filter pipelines by factor (e.g. ONE, THREE) (implies RATIS replication type)") + private ReplicationFactor factor; + + Optional> getReplicationFilter() { + boolean hasReplication = !Strings.isNullOrEmpty(replication); + boolean hasFactor = factor != null; + boolean hasReplicationType = !Strings.isNullOrEmpty(replicationType); + + if (hasFactor) { + if (hasReplication) { + throw new IllegalArgumentException("Factor and replication are mutually exclusive"); + } + ReplicationConfig replicationConfig = RatisReplicationConfig.getInstance(factor.toProto()); + return Optional.of(p -> replicationConfig.equals(p.getReplicationConfig())); + } + + if (hasReplication) { + if (!hasReplicationType) { + throw new IllegalArgumentException("Replication type is required if replication is set"); + } + + ReplicationConfig replicationConfig = + ReplicationConfig.parse(ReplicationType.valueOf(replicationType), replication, new OzoneConfiguration()); + return Optional.of(p -> replicationConfig.equals(p.getReplicationConfig())); + } + + if (hasReplicationType) { + return Optional.of(p -> p.getReplicationConfig() + .getReplicationType() + .toString() + .compareToIgnoreCase(replicationType) == 0); + } + + return Optional.empty(); + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java index f08d316500f2..9f88b7375683 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java @@ -20,11 +20,6 @@ import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -47,24 +42,8 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) public class ListPipelinesSubcommand extends ScmSubcommand { - - @CommandLine.Option(names = {"-t", "--type"}, - description = "Filter listed pipelines by replication type, RATIS or EC", - defaultValue = "") - private String replicationType; - - @CommandLine.Option( - names = {"-r", "--replication"}, - description = "Filter listed pipelines by replication, eg ONE, THREE or " - + "for EC rs-3-2-1024k", - defaultValue = "") - private String replication; - - @CommandLine.Option( - names = {"-ffc", "--filterByFactor", "--filter-by-factor"}, - description = "[deprecated] Filter pipelines by factor (e.g. ONE, THREE) " - + " (implies RATIS replication type)") - private ReplicationFactor factor; + @CommandLine.Mixin + private final FilterPipelineOptions filterOptions = new FilterPipelineOptions(); @CommandLine.Option( names = {"-s", "--state", "-fst", "--filterByState", "--filter-by-state"}, @@ -72,15 +51,15 @@ public class ListPipelinesSubcommand extends ScmSubcommand { defaultValue = "") private String state; - @CommandLine.Option(names = { "--json" }, - defaultValue = "false", - description = "Format output as JSON") - private boolean json; + @CommandLine.Option( + names = {"--json"}, + defaultValue = "false", + description = "Format output as JSON") + private boolean json; @Override public void execute(ScmClient scmClient) throws IOException { - Optional> replicationFilter = - getReplicationFilter(); + Optional> replicationFilter = filterOptions.getReplicationFilter(); Stream stream = scmClient.listPipelines().stream(); if (replicationFilter.isPresent()) { @@ -99,44 +78,4 @@ public void execute(ScmClient scmClient) throws IOException { stream.forEach(System.out::println); } } - - private Optional> getReplicationFilter() { - boolean hasReplication = !Strings.isNullOrEmpty(replication); - boolean hasFactor = factor != null; - boolean hasReplicationType = !Strings.isNullOrEmpty(replicationType); - - if (hasFactor) { - if (hasReplication) { - throw new IllegalArgumentException( - "Factor and replication are mutually exclusive"); - } - - ReplicationConfig replicationConfig = - RatisReplicationConfig.getInstance(factor.toProto()); - return Optional.of( - p -> replicationConfig.equals(p.getReplicationConfig())); - } - - if (hasReplication) { - if (!hasReplicationType) { - throw new IllegalArgumentException( - "Replication type is required if replication is set"); - } - - ReplicationConfig replicationConfig = - ReplicationConfig.parse(ReplicationType.valueOf(replicationType), - replication, new OzoneConfiguration()); - return Optional.of( - p -> replicationConfig.equals(p.getReplicationConfig())); - } - - if (hasReplicationType) { - return Optional.of(p -> p.getReplicationConfig() - .getReplicationType() - .toString() - .compareToIgnoreCase(replicationType) == 0); - } - - return Optional.empty(); - } } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java index d8c1addb78e0..efc11d550f55 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java @@ -28,9 +28,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -44,6 +41,7 @@ import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.UUID; import java.util.regex.Matcher; @@ -52,6 +50,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -69,8 +68,6 @@ public class TestInfoSubCommand { private ScmClient scmClient; private InfoSubcommand cmd; private List datanodes; - private Logger logger; - private TestAppender appender; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); @@ -88,18 +85,12 @@ public void setup() throws IOException { when(scmClient.getContainerWithPipeline(anyLong())).then(i -> getContainerWithPipeline(i.getArgument(0))); when(scmClient.getPipeline(any())).thenThrow(new PipelineNotFoundException("Pipeline not found.")); - appender = new TestAppender(); - logger = Logger.getLogger( - org.apache.hadoop.hdds.scm.cli.container.InfoSubcommand.class); - logger.addAppender(appender); - System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @AfterEach public void after() { - logger.removeAppender(appender); System.setOut(originalOut); System.setErr(originalErr); System.setIn(originalIn); @@ -150,10 +141,8 @@ public void testContainersCanBeReadFromStdin() throws IOException { private void validateMultiOutput() throws UnsupportedEncodingException { // Ensure we have a log line for each containerID - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage() - .matches("(?s)^Container id: (1|123|456|789).*")) + List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n")) + .filter(m -> m.matches("(?s)^Container id: (1|123|456|789).*")) .collect(Collectors.toList()); assertEquals(4, replica.size()); @@ -191,10 +180,8 @@ public void testMultipleContainersCanBePassedJson() throws Exception { private void validateJsonMultiOutput() throws UnsupportedEncodingException { // Ensure we have a log line for each containerID - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage() - .matches("(?s)^.*\"containerInfo\".*")) + List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n")) + .filter(m -> m.matches("(?s)^.*\"containerInfo\".*")) .collect(Collectors.toList()); assertEquals(4, replica.size()); @@ -213,34 +200,33 @@ private void testReplicaIncludedInOutput(boolean includeIndex) cmd.execute(scmClient); // Ensure we have a line for Replicas: - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*")) - .collect(Collectors.toList()); - assertEquals(1, replica.size()); + String output = outContent.toString(DEFAULT_ENCODING); + Pattern pattern = Pattern.compile("Replicas: \\[.*\\]", Pattern.DOTALL); + Matcher matcher = pattern.matcher(output); + assertTrue(matcher.find()); + String replica = matcher.group(); // Ensure each DN UUID is mentioned in the message: for (DatanodeDetails dn : datanodes) { - Pattern pattern = Pattern.compile(".*" + dn.getUuid().toString() + ".*", + Pattern uuidPattern = Pattern.compile(".*" + dn.getUuid().toString() + ".*", Pattern.DOTALL); - Matcher matcher = pattern.matcher(replica.get(0).getRenderedMessage()); - assertTrue(matcher.matches()); + assertThat(replica).matches(uuidPattern); } // Ensure the replicaIndex output is in order if (includeIndex) { List indexList = new ArrayList<>(); for (int i = 1; i < datanodes.size() + 1; i++) { String temp = "ReplicaIndex: " + i; - indexList.add(replica.get(0).getRenderedMessage().indexOf(temp)); + indexList.add(replica.indexOf(temp)); } assertEquals(datanodes.size(), indexList.size()); assertTrue(inSort(indexList)); } // Ensure ReplicaIndex is not mentioned as it was not passed in the proto: - Pattern pattern = Pattern.compile(".*ReplicaIndex.*", - Pattern.DOTALL); - Matcher matcher = pattern.matcher(replica.get(0).getRenderedMessage()); - assertEquals(includeIndex, matcher.matches()); + assertEquals(includeIndex, + Pattern.compile(".*ReplicaIndex.*", Pattern.DOTALL) + .matcher(replica) + .matches()); } @Test @@ -253,9 +239,8 @@ public void testReplicasNotOutputIfError() throws IOException { cmd.execute(scmClient); // Ensure we have no lines for Replicas: - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*")) + List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n")) + .filter(m -> m.matches("(?s)^Replicas:.*")) .collect(Collectors.toList()); assertEquals(0, replica.size()); @@ -274,9 +259,7 @@ public void testReplicasNotOutputIfErrorWithJson() throws IOException { c.parseArgs("1", "--json"); cmd.execute(scmClient); - List logs = appender.getLog(); - assertEquals(1, logs.size()); - String json = logs.get(0).getRenderedMessage(); + String json = outContent.toString(DEFAULT_ENCODING); assertFalse(json.matches("(?s).*replicas.*")); } @@ -310,11 +293,8 @@ private void testJsonOutput() throws IOException { c.parseArgs("1", "--json"); cmd.execute(scmClient); - List logs = appender.getLog(); - assertEquals(1, logs.size()); - // Ensure each DN UUID is mentioned in the message after replicas: - String json = logs.get(0).getRenderedMessage(); + String json = outContent.toString(DEFAULT_ENCODING); assertTrue(json.matches("(?s).*replicas.*")); for (DatanodeDetails dn : datanodes) { Pattern pattern = Pattern.compile( @@ -409,25 +389,4 @@ private List createDatanodeDetails(int count) { return dns; } - private static class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList<>(log); - } - } } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java index 3be931c13211..b3c15a46f76f 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java @@ -22,6 +22,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.CodecTestUtil; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; @@ -44,6 +46,8 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -135,6 +139,16 @@ public void setup() throws Exception { chunkManager = new FilePerBlockStrategy(true, blockManager, null); } + @BeforeAll + public static void beforeClass() { + CodecBuffer.enableLeakDetection(); + } + + @AfterEach + public void after() throws Exception { + CodecTestUtil.gc(); + } + @Test public void testUpgrade() throws IOException { int num = 2; @@ -187,7 +201,7 @@ private Map putAnyBlockData(KeyValueContainerData data, private void putChunksInBlock(int numOfChunksPerBlock, int i, List chunks, KeyValueContainer container, BlockID blockID) { - long chunkLength = 100; + final long chunkLength = 100; try { for (int k = 0; k < numOfChunksPerBlock; k++) { final String chunkName = String.format("%d_chunk_%d_block_%d", @@ -199,11 +213,10 @@ private void putChunksInBlock(int numOfChunksPerBlock, int i, .setChecksumData(Checksum.getNoChecksumDataProto()).build(); chunks.add(info); ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, chunkLength); - final ChunkBuffer chunkData = ChunkBuffer.allocate((int) chunkLength); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); + try (ChunkBuffer chunkData = ChunkBuffer.allocate((int) chunkLength)) { + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); + } } } catch (IOException ex) { LOG.warn("Putting chunks in blocks was not successful for BlockID: " diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java index e271cdfe0298..27c360e72743 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java @@ -104,7 +104,10 @@ public void testContainerBalancerStopSubcommand() throws IOException { ScmClient scmClient = mock(ScmClient.class); stopCmd.execute(scmClient); - Pattern p = Pattern.compile("^Stopping\\sContainerBalancer..."); + Pattern p = Pattern.compile("^Sending\\sstop\\scommand." + + "\\sWaiting\\sfor\\sContainer\\sBalancer\\sto\\sstop...\\n" + + "Container\\sBalancer\\sstopped."); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } @@ -114,7 +117,7 @@ public void testContainerBalancerStartSubcommandWhenBalancerIsNotRunning() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.startContainerBalancer( - null, null, null, null, null, null)) + null, null, null, null, null, null, null, null, null, null, null, null)) .thenReturn( StorageContainerLocationProtocolProtos .StartContainerBalancerResponseProto.newBuilder() @@ -133,7 +136,7 @@ public void testContainerBalancerStartSubcommandWhenBalancerIsRunning() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.startContainerBalancer( - null, null, null, null, null, null)) + null, null, null, null, null, null, null, null, null, null, null, null)) .thenReturn(StorageContainerLocationProtocolProtos .StartContainerBalancerResponseProto.newBuilder() .setStart(false) diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index 41c31caf1f0a..fce593ab8c35 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -60,6 +60,7 @@ public class TestDecommissionStatusSubCommand { private DecommissionStatusSubCommand cmd; private List nodes = getNodeDetails(2); private Map> containerOnDecom = getContainersOnDecomNodes(); + private ArrayList metrics = getMetrics(); @BeforeEach public void setup() throws UnsupportedEncodingException { @@ -80,6 +81,7 @@ public void testSuccessWhenDecommissionStatus() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -91,15 +93,17 @@ public void testSuccessWhenDecommissionStatus() throws IOException { p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)"); + p = Pattern.compile("No\\. of Unclosed Pipelines:"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + assertTrue(m.find()); // metrics for both are shown + p = Pattern.compile("UnderReplicated=.* UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + assertTrue(m.find()); // container lists for both are shown } @Test @@ -109,6 +113,7 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenReturn(new ArrayList<>()); when(scmClient.getContainersOnDecomNode(any())).thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(0)); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -117,10 +122,10 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { assertTrue(m.find()); // no host details are shown - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -131,24 +136,22 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(0).getNodeID().getUuid()); cmd.execute(scmClient); // check status of host0 - Pattern p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + Pattern p = Pattern.compile("Datanode:\\s.*host0\\)"); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); - m = p.matcher(outContent.toString(DEFAULT_ENCODING)); - assertTrue(m.find()); - // as uuid of only host0 is passed, host1 should NOT be displayed - p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("UnderReplicated=.*UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); assertFalse(m.find()); } @@ -161,6 +164,7 @@ public void testIdOptionDecommissionStatusFail() throws IOException { .thenReturn(containerOnDecom); when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) .thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(2)); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(1).getNodeID().getUuid()); @@ -172,10 +176,10 @@ public void testIdOptionDecommissionStatusFail() throws IOException { assertTrue(m.find()); // no host details are shown - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -186,24 +190,22 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); cmd.execute(scmClient); // check status of host1 - Pattern p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + Pattern p = Pattern.compile("Datanode:\\s.*host1\\)"); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); - m = p.matcher(outContent.toString(DEFAULT_ENCODING)); - assertTrue(m.find()); - // as IpAddress of only host1 is passed, host0 should NOT be displayed - p = Pattern.compile("Datanode:\\s.*host0.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("UnderReplicated=.*UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); assertFalse(m.find()); } @@ -216,6 +218,7 @@ public void testIpOptionDecommissionStatusFail() throws IOException { .thenReturn(containerOnDecom); when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) .thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(2)); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); @@ -226,11 +229,11 @@ public void testIpOptionDecommissionStatusFail() throws IOException { Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -275,4 +278,38 @@ private Map> getContainersOnDecomNodes() { return containerMap; } + private ArrayList getMetrics() { + ArrayList result = new ArrayList<>(); + // no nodes decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 0, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 0, " + + "\"ContainersUnderReplicatedTotal\" : 0, \"ContainersUnClosedTotal\" : 0, " + + "\"ContainersSufficientlyReplicatedTotal\" : 0 } ]}"); + // 2 nodes in decommisioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 2, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 2, " + + "\"ContainersUnderReplicatedTotal\" : 6, \"ContainersUnclosedTotal\" : 6, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\", \"tag.Hostname.1\" : \"host0\", " + + "\"PipelinesWaitingToCloseDN.1\" : 1, \"UnderReplicatedDN.1\" : 3, " + + "\"SufficientlyReplicatedDN.1\" : 0, \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 111211, " + + "\"tag.datanode.2\" : \"host1\", \"tag.Hostname.2\" : \"host1\", " + + "\"PipelinesWaitingToCloseDN.2\" : 1, \"UnderReplicatedDN.2\" : 3, " + + "\"SufficientlyReplicatedDN.2\" : 0, \"UnclosedContainersDN.2\" : 3, \"StartTimeDN.2\" : 221221} ]}"); + // only host 1 decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 1, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 1, " + + "\"ContainersUnderReplicatedTotal\" : 3, \"ContainersUnclosedTotal\" : 3, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\",\n \"tag.Hostname.1\" : \"host0\",\n " + + "\"PipelinesWaitingToCloseDN.1\" : 1,\n \"UnderReplicatedDN.1\" : 3,\n " + + "\"SufficientlyReplicatedDN.1\" : 0,\n \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 221221} ]}"); + return result; + } } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java index e7e01ffaa1af..d6f0f8ae8267 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java @@ -37,6 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -71,7 +72,7 @@ public void tearDown() { @Test public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); String input = "host1\nhost2\nhost3\n"; @@ -100,7 +101,7 @@ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { @Test public void testNoErrorsWhenDecommissioning() throws IOException { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); CommandLine c = new CommandLine(cmd); @@ -123,7 +124,7 @@ public void testNoErrorsWhenDecommissioning() throws IOException { @Test public void testErrorsReportedWhenDecommissioning() throws IOException { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); e.add(new DatanodeAdminError("host1", "host1 error")); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java index d2a4c54b8bf2..51f7d7182159 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java @@ -37,6 +37,8 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; @@ -72,7 +74,7 @@ public void tearDown() { @Test public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); String input = "host1\nhost2\nhost3\n"; @@ -101,7 +103,7 @@ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { @Test public void testNoErrorsWhenEnteringMaintenance() throws IOException { - when(scmClient.startMaintenanceNodes(anyList(), anyInt())) + when(scmClient.startMaintenanceNodes(anyList(), anyInt(), eq(true))) .thenAnswer(invocation -> new ArrayList()); CommandLine c = new CommandLine(cmd); @@ -125,7 +127,7 @@ public void testNoErrorsWhenEnteringMaintenance() throws IOException { @Test public void testErrorsReportedWhenEnteringMaintenance() throws IOException { - when(scmClient.startMaintenanceNodes(anyList(), anyInt())) + when(scmClient.startMaintenanceNodes(anyList(), anyInt(), anyBoolean())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); e.add(new DatanodeAdminError("host1", "host1 error")); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java index e274cd4fd544..083ada8a4207 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java @@ -37,6 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -71,7 +72,7 @@ public void tearDown() { @Test public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); String input = "host1\nhost2\nhost3\n"; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java index 687605987a68..dae9bbdd5adc 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -18,12 +18,16 @@ package org.apache.hadoop.ozone.client; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import net.jcip.annotations.Immutable; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -32,85 +36,66 @@ * This class encapsulates the arguments that are * required for creating a bucket. */ +@Immutable public final class BucketArgs { /** * ACL Information. */ - private List acls; + private final ImmutableList acls; /** * Bucket Version flag. */ - private Boolean versioning; + private final boolean versioning; /** * Type of storage to be used for this bucket. * [RAM_DISK, SSD, DISK, ARCHIVE] */ - private StorageType storageType; + private final StorageType storageType; /** * Custom key/value metadata. */ - private Map metadata; + private final Map metadata; /** * Bucket encryption key name. */ - private String bucketEncryptionKey; - private DefaultReplicationConfig defaultReplicationConfig; + private final String bucketEncryptionKey; + private final DefaultReplicationConfig defaultReplicationConfig; private final String sourceVolume; private final String sourceBucket; - private long quotaInBytes; - private long quotaInNamespace; + private final long quotaInBytes; + private final long quotaInNamespace; - private String owner; + private final String owner; /** * Bucket Layout. */ - private BucketLayout bucketLayout = BucketLayout.DEFAULT; - - /** - * Private constructor, constructed via builder. - * @param versioning Bucket version flag. - * @param storageType Storage type to be used. - * @param acls list of ACLs. - * @param metadata map of bucket metadata - * @param bucketEncryptionKey bucket encryption key name - * @param sourceVolume - * @param sourceBucket - * @param quotaInBytes Bucket quota in bytes. - * @param quotaInNamespace Bucket quota in counts. - * @param bucketLayout bucket layout. - * @param owner owner of the bucket. - * @param defaultReplicationConfig default replication config. - */ - @SuppressWarnings("parameternumber") - private BucketArgs(Boolean versioning, StorageType storageType, - List acls, Map metadata, - String bucketEncryptionKey, String sourceVolume, String sourceBucket, - long quotaInBytes, long quotaInNamespace, BucketLayout bucketLayout, - String owner, DefaultReplicationConfig defaultReplicationConfig) { - this.acls = acls; - this.versioning = versioning; - this.storageType = storageType; - this.metadata = metadata; - this.bucketEncryptionKey = bucketEncryptionKey; - this.sourceVolume = sourceVolume; - this.sourceBucket = sourceBucket; - this.quotaInBytes = quotaInBytes; - this.quotaInNamespace = quotaInNamespace; - this.bucketLayout = bucketLayout; - this.owner = owner; - this.defaultReplicationConfig = defaultReplicationConfig; + private final BucketLayout bucketLayout; + + private BucketArgs(Builder b) { + acls = b.acls == null ? ImmutableList.of() : ImmutableList.copyOf(b.acls); + versioning = b.versioning; + storageType = b.storageType; + metadata = b.metadata == null ? ImmutableMap.of() : ImmutableMap.copyOf(b.metadata); + bucketEncryptionKey = b.bucketEncryptionKey; + sourceVolume = b.sourceVolume; + sourceBucket = b.sourceBucket; + quotaInBytes = b.quotaInBytes; + quotaInNamespace = b.quotaInNamespace; + bucketLayout = b.bucketLayout; + owner = b.owner; + defaultReplicationConfig = b.defaultReplicationConfig; } /** * Returns true if bucket version is enabled, else false. * @return isVersionEnabled */ - public Boolean getVersioning() { + public boolean getVersioning() { return versioning; } @@ -206,7 +191,7 @@ public String getOwner() { * Builder for OmBucketInfo. */ public static class Builder { - private Boolean versioning; + private boolean versioning; private StorageType storageType; private List acls; private Map metadata; @@ -220,12 +205,11 @@ public static class Builder { private DefaultReplicationConfig defaultReplicationConfig; public Builder() { - metadata = new HashMap<>(); quotaInBytes = OzoneConsts.QUOTA_RESET; quotaInNamespace = OzoneConsts.QUOTA_RESET; } - public BucketArgs.Builder setVersioning(Boolean versionFlag) { + public BucketArgs.Builder setVersioning(boolean versionFlag) { this.versioning = versionFlag; return this; } @@ -235,13 +219,19 @@ public BucketArgs.Builder setStorageType(StorageType storage) { return this; } - public BucketArgs.Builder setAcls(List listOfAcls) { - this.acls = listOfAcls; + public BucketArgs.Builder addAcl(OzoneAcl acl) { + if (acls == null) { + acls = new ArrayList<>(); + } + acls.add(acl); return this; } public BucketArgs.Builder addMetadata(String key, String value) { - this.metadata.put(key, value); + if (metadata == null) { + metadata = new HashMap<>(); + } + metadata.put(key, value); return this; } @@ -291,9 +281,7 @@ public BucketArgs.Builder setDefaultReplicationConfig( * @return instance of BucketArgs. */ public BucketArgs build() { - return new BucketArgs(versioning, storageType, acls, metadata, - bucketEncryptionKey, sourceVolume, sourceBucket, quotaInBytes, - quotaInNamespace, bucketLayout, owner, defaultReplicationConfig); + return new BucketArgs(this); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 481bdbbd5c2a..e96d0f84a437 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -565,6 +565,21 @@ public String createSnapshot(String volumeName, return proxy.createSnapshot(volumeName, bucketName, snapshotName); } + /** + * Rename snapshot. + * + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + public void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException { + proxy.renameSnapshot(volumeName, bucketName, snapshotOldName, snapshotNewName); + } + /** * Delete snapshot. * @param volumeName vol to be used diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index ca885b3b6b06..6972831477e7 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -54,7 +54,6 @@ import java.time.Instant; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -62,6 +61,7 @@ import java.util.NoSuchElementException; import java.util.stream.Collectors; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -154,7 +154,7 @@ public class OzoneBucket extends WithMetadata { private String owner; protected OzoneBucket(Builder builder) { - this.metadata = builder.metadata; + super(builder); this.proxy = builder.proxy; this.volumeName = builder.volumeName; this.name = builder.name; // bucket name @@ -430,7 +430,7 @@ public void setEncryptionKey(String bekName) throws IOException { public OzoneOutputStream createKey(String key, long size) throws IOException { return createKey(key, size, defaultReplication, - new HashMap<>()); + Collections.emptyMap()); } /** @@ -458,6 +458,7 @@ public OzoneOutputStream createKey(String key, long size, * @param key Name of the key to be created. * @param size Size of the data the key will point to. * @param replicationConfig Replication configuration. + * @param keyMetadata Custom key metadata. * @return OzoneOutputStream to which the data has to be written. * @throws IOException */ @@ -465,8 +466,27 @@ public OzoneOutputStream createKey(String key, long size, ReplicationConfig replicationConfig, Map keyMetadata) throws IOException { + return this.createKey(key, size, replicationConfig, keyMetadata, Collections.emptyMap()); + } + + /** + * Creates a new key in the bucket. + * + * @param key Name of the key to be created. + * @param size Size of the data the key will point to. + * @param replicationConfig Replication configuration. + * @param keyMetadata Custom key metadata. + * @param tags Tags used for S3 object tags + * @return OzoneOutputStream to which the data has to be written. + * @throws IOException + */ + public OzoneOutputStream createKey(String key, long size, + ReplicationConfig replicationConfig, + Map keyMetadata, + Map tags) + throws IOException { return proxy - .createKey(volumeName, name, key, size, replicationConfig, keyMetadata); + .createKey(volumeName, name, key, size, replicationConfig, keyMetadata, tags); } /** @@ -490,6 +510,7 @@ public OzoneDataStreamOutput createStreamKey(String key, long size) * @param key Name of the key to be created. * @param size Size of the data the key will point to. * @param replicationConfig Replication configuration. + * @param keyMetadata Custom key metadata. * @return OzoneDataStreamOutput to which the data has to be written. * @throws IOException */ @@ -499,8 +520,28 @@ public OzoneDataStreamOutput createStreamKey(String key, long size, if (replicationConfig == null) { replicationConfig = defaultReplication; } + return this.createStreamKey(key, size, replicationConfig, keyMetadata, + Collections.emptyMap()); + } + + /** + * Creates a new key in the bucket. + * + * @param key Name of the key to be created. + * @param size Size of the data the key will point to. + * @param replicationConfig Replication configuration. + * @param keyMetadata Custom key metadata. + * @return OzoneDataStreamOutput to which the data has to be written. + * @throws IOException + */ + public OzoneDataStreamOutput createStreamKey(String key, long size, + ReplicationConfig replicationConfig, Map keyMetadata, + Map tags) throws IOException { + if (replicationConfig == null) { + replicationConfig = defaultReplication; + } return proxy.createStreamKey(volumeName, name, key, size, - replicationConfig, keyMetadata); + replicationConfig, keyMetadata, tags); } /** @@ -658,11 +699,12 @@ public void renameKeys(Map keyMap) /** * Initiate multipart upload for a specified key. - * @param keyName - * @param type - * @param factor + * @param keyName Name of the key to be created when the multipart upload is completed. + * @param type Replication type to be used. + * @param factor Replication factor of the key. * @return OmMultipartInfo * @throws IOException + * @deprecated Use {@link OzoneBucket#initiateMultipartUpload(String, ReplicationConfig)} instead. */ @Deprecated public OmMultipartInfo initiateMultipartUpload(String keyName, @@ -675,11 +717,45 @@ public OmMultipartInfo initiateMultipartUpload(String keyName, /** * Initiate multipart upload for a specified key. + * @param keyName Name of the key to be created when the multipart upload is completed. + * @param config Replication config. + * @return OmMultipartInfo + * @throws IOException */ public OmMultipartInfo initiateMultipartUpload(String keyName, ReplicationConfig config) throws IOException { - return proxy.initiateMultipartUpload(volumeName, name, keyName, config); + return initiateMultipartUpload(keyName, config, Collections.emptyMap()); + } + + /** + * Initiate multipart upload for a specified key. + * @param keyName Name of the key to be created when the multipart upload is completed. + * @param config Replication config. + * @param metadata Custom key metadata. + * @return OmMultipartInfo + * @throws IOException + */ + public OmMultipartInfo initiateMultipartUpload(String keyName, + ReplicationConfig config, Map metadata) + throws IOException { + return initiateMultipartUpload(keyName, config, metadata, Collections.emptyMap()); + } + + /** + * Initiate multipart upload for a specified key. + * @param keyName Name of the key to be created when the multipart upload is completed. + * @param config Replication config. + * @param metadata Custom key metadata. + * @param tags Tags used for S3 object tags. + * @return OmMultipartInfo + * @throws IOException + */ + public OmMultipartInfo initiateMultipartUpload(String keyName, + ReplicationConfig config, Map metadata, + Map tags) + throws IOException { + return proxy.initiateMultipartUpload(volumeName, name, keyName, config, metadata, tags); } /** @@ -954,8 +1030,7 @@ public static Builder newBuilder(ConfigurationSource conf, /** * Inner builder for OzoneBucket. */ - public static class Builder { - private Map metadata; + public static class Builder extends WithMetadata.Builder { private ConfigurationSource conf; private ClientProtocol proxy; private String volumeName; @@ -983,8 +1058,9 @@ private Builder(ConfigurationSource conf, ClientProtocol proxy) { this.proxy = proxy; } + @Override public Builder setMetadata(Map metadata) { - this.metadata = metadata; + super.setMetadata(metadata); return this; } @@ -1253,7 +1329,7 @@ List getNextShallowListOfKeys(String prevKey) proxy.listStatusLight(volumeName, name, delimiterKeyPrefix, false, startKey, listCacheSize, false); - if (addedKeyPrefix) { + if (addedKeyPrefix && statuses.size() > 0) { // previous round already include the startKey, so remove it statuses.remove(0); } else { @@ -1276,25 +1352,35 @@ protected void initDelimiterKeyPrefix() { protected List buildKeysWithKeyPrefix( List statuses) { return statuses.stream() - .map(status -> { - BasicOmKeyInfo keyInfo = status.getKeyInfo(); - String keyName = keyInfo.getKeyName(); - if (status.isDirectory()) { - // add trailing slash to represent directory - keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - } - return new OzoneKey(keyInfo.getVolumeName(), - keyInfo.getBucketName(), keyName, - keyInfo.getDataSize(), keyInfo.getCreationTime(), - keyInfo.getModificationTime(), - keyInfo.getReplicationConfig(), keyInfo.isFile()); - }) + .map(OzoneBucket::toOzoneKey) .filter(key -> StringUtils.startsWith(key.getName(), getKeyPrefix())) .collect(Collectors.toList()); } } + private static OzoneKey toOzoneKey(OzoneFileStatusLight status) { + BasicOmKeyInfo keyInfo = status.getKeyInfo(); + String keyName = keyInfo.getKeyName(); + final Map metadata; + if (status.isDirectory()) { + // add trailing slash to represent directory + keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + metadata = Collections.emptyMap(); + } else { + metadata = Collections.singletonMap(ETAG, keyInfo.getETag()); + } + return new OzoneKey(keyInfo.getVolumeName(), + keyInfo.getBucketName(), keyName, + keyInfo.getDataSize(), keyInfo.getCreationTime(), + keyInfo.getModificationTime(), + keyInfo.getReplicationConfig(), + metadata, + keyInfo.isFile(), + keyInfo.getOwnerName(), + Collections.emptyMap()); + } + /** * An Iterator to iterate over {@link OzoneKey} list. @@ -1662,21 +1748,7 @@ private boolean getChildrenKeys(String keyPrefix, String startKey, for (int indx = 0; indx < statuses.size(); indx++) { OzoneFileStatusLight status = statuses.get(indx); BasicOmKeyInfo keyInfo = status.getKeyInfo(); - String keyName = keyInfo.getKeyName(); - - OzoneKey ozoneKey; - // Add dir to the dirList - if (status.isDirectory()) { - // add trailing slash to represent directory - keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - } - ozoneKey = new OzoneKey(keyInfo.getVolumeName(), - keyInfo.getBucketName(), keyName, - keyInfo.getDataSize(), keyInfo.getCreationTime(), - keyInfo.getModificationTime(), - keyInfo.getReplicationConfig(), - keyInfo.isFile()); - + OzoneKey ozoneKey = toOzoneKey(status); keysResultList.add(ozoneKey); if (status.isDirectory()) { @@ -1779,7 +1851,8 @@ private void addKeyPrefixInfoToResultList(String keyPrefix, keyInfo.getDataSize(), keyInfo.getCreationTime(), keyInfo.getModificationTime(), keyInfo.getReplicationConfig(), - keyInfo.isFile()); + keyInfo.isFile(), + keyInfo.getOwnerName()); keysResultList.add(ozoneKey); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java index fba9826df1a1..fdd89fe81990 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java @@ -44,6 +44,10 @@ public class OzoneKey { * Name of the Key. */ private final String name; + /** + * Name of the Key owner. + */ + private final String owner; /** * Size of the data. */ @@ -59,7 +63,9 @@ public class OzoneKey { private ReplicationConfig replicationConfig; - private Map metadata = new HashMap<>(); + private final Map metadata = new HashMap<>(); + + private final Map tags = new HashMap<>(); /** * Indicator if key is a file. @@ -74,7 +80,7 @@ public class OzoneKey { public OzoneKey(String volumeName, String bucketName, String keyName, long size, long creationTime, long modificationTime, ReplicationConfig replicationConfig, - boolean isFile) { + boolean isFile, String owner) { this.volumeName = volumeName; this.bucketName = bucketName; this.name = keyName; @@ -83,16 +89,19 @@ public OzoneKey(String volumeName, String bucketName, this.modificationTime = Instant.ofEpochMilli(modificationTime); this.replicationConfig = replicationConfig; this.isFile = isFile; + this.owner = owner; } @SuppressWarnings("parameternumber") public OzoneKey(String volumeName, String bucketName, String keyName, long size, long creationTime, long modificationTime, ReplicationConfig replicationConfig, - Map metadata, boolean isFile) { + Map metadata, boolean isFile, String owner, + Map tags) { this(volumeName, bucketName, keyName, size, creationTime, - modificationTime, replicationConfig, isFile); + modificationTime, replicationConfig, isFile, owner); this.metadata.putAll(metadata); + this.tags.putAll(tags); } /** @@ -122,6 +131,15 @@ public String getName() { return name; } + /** + * Returns the Owner Name. + * + * @return keyName + */ + public String getOwner() { + return owner; + } + /** * Returns the size of the data. * @@ -149,10 +167,24 @@ public Instant getModificationTime() { return modificationTime; } + /** + * Returns the metadata of the key. + * + * @return key metadata. + */ public Map getMetadata() { return metadata; } + /** + * Returns the tags of the key. + * + * @return key tags. + */ + public Map getTags() { + return tags; + } + public void setMetadata(Map metadata) { this.metadata.putAll(metadata); } @@ -191,7 +223,8 @@ public static OzoneKey fromKeyInfo(OmKeyInfo keyInfo) { return new OzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName(), keyInfo.getDataSize(), keyInfo.getCreationTime(), keyInfo.getModificationTime(), keyInfo.getReplicationConfig(), - keyInfo.getMetadata(), keyInfo.isFile()); + keyInfo.getMetadata(), keyInfo.isFile(), keyInfo.getOwnerName(), + keyInfo.getTags()); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java index 8c29b66fd34a..168e15d9bdd4 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java @@ -53,9 +53,9 @@ public OzoneKeyDetails(String volumeName, String bucketName, String keyName, Map metadata, FileEncryptionInfo feInfo, CheckedSupplier contentSupplier, - boolean isFile) { + boolean isFile, String owner, Map tags) { super(volumeName, bucketName, keyName, size, creationTime, - modificationTime, replicationConfig, metadata, isFile); + modificationTime, replicationConfig, metadata, isFile, owner, tags); this.ozoneKeyLocations = ozoneKeyLocations; this.feInfo = feInfo; this.contentSupplier = contentSupplier; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java index c1902cdb60d2..c085720d1918 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java @@ -98,18 +98,21 @@ public ReplicationConfig getReplicationConfig() { /** * Class that represents each Part information of a multipart upload part. */ - public static class PartInfo { + public static final class PartInfo { - private int partNumber; - private String partName; - private long modificationTime; - private long size; + private final int partNumber; + private final String partName; + private final long modificationTime; + private final long size; + private final String eTag; - public PartInfo(int number, String name, long time, long size) { + public PartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -127,5 +130,9 @@ public long getModificationTime() { public long getSize() { return size; } + + public String getETag() { + return eTag; + } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 47b50c042a27..9ab110aa2b55 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -106,7 +106,7 @@ public class OzoneVolume extends WithMetadata { private long refCount; protected OzoneVolume(Builder builder) { - this.metadata = builder.metadata; + super(builder); this.proxy = builder.proxy; this.name = builder.name; this.admin = builder.admin; @@ -409,8 +409,7 @@ public static Builder newBuilder(ConfigurationSource conf, /** * Inner builder for OzoneVolume. */ - public static class Builder { - private Map metadata; + public static class Builder extends WithMetadata.Builder { private ConfigurationSource conf; private ClientProtocol proxy; private String name; @@ -482,8 +481,9 @@ public Builder setRefCount(long refCount) { return this; } + @Override public Builder setMetadata(Map metadata) { - this.metadata = metadata; + super.setMetadata(metadata); return this; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java index 9d683c5393c2..12a649ca9206 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java @@ -18,10 +18,14 @@ package org.apache.hadoop.ozone.client; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import net.jcip.annotations.Immutable; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -30,14 +34,15 @@ * This class encapsulates the arguments that are * required for creating a volume. */ +@Immutable public final class VolumeArgs { private final String admin; private final String owner; private final long quotaInBytes; private final long quotaInNamespace; - private final List acls; - private Map metadata; + private final ImmutableList acls; + private final ImmutableMap metadata; /** * Private constructor, constructed via builder. @@ -58,8 +63,8 @@ private VolumeArgs(String admin, this.owner = owner; this.quotaInBytes = quotaInBytes; this.quotaInNamespace = quotaInNamespace; - this.acls = acls; - this.metadata = metadata; + this.acls = acls == null ? ImmutableList.of() : ImmutableList.copyOf(acls); + this.metadata = metadata == null ? ImmutableMap.of() : ImmutableMap.copyOf(metadata); } /** @@ -107,34 +112,20 @@ public List getAcls() { return acls; } - /** - * Returns new builder class that builds a OmVolumeArgs. - * - * @return Builder - */ public static VolumeArgs.Builder newBuilder() { return new VolumeArgs.Builder(); } /** - * Builder for OmVolumeArgs. + * Builder for VolumeArgs. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private String adminName; private String ownerName; - private long quotaInBytes; - private long quotaInNamespace; - private List listOfAcls; - private Map metadata = new HashMap<>(); - - /** - * Constructs a builder. - */ - public Builder() { - quotaInBytes = OzoneConsts.QUOTA_RESET; - quotaInNamespace = OzoneConsts.QUOTA_RESET; - } + private long quotaInBytes = OzoneConsts.QUOTA_RESET; + private long quotaInNamespace = OzoneConsts.QUOTA_RESET; + private List acls; + private Map metadata; public VolumeArgs.Builder setAdmin(String admin) { this.adminName = admin; @@ -157,12 +148,18 @@ public VolumeArgs.Builder setQuotaInNamespace(long quota) { } public VolumeArgs.Builder addMetadata(String key, String value) { + if (metadata == null) { + metadata = new HashMap<>(); + } metadata.put(key, value); return this; } - public VolumeArgs.Builder setAcls(List acls) + public VolumeArgs.Builder addAcl(OzoneAcl acl) throws IOException { - this.listOfAcls = acls; + if (acls == null) { + acls = new ArrayList<>(); + } + acls.add(acl); return this; } @@ -172,7 +169,7 @@ public VolumeArgs.Builder setAcls(List acls) */ public VolumeArgs build() { return new VolumeArgs(adminName, ownerName, quotaInBytes, - quotaInNamespace, listOfAcls, metadata); + quotaInNamespace, acls, metadata); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java index e0b82bebc3a8..b2c30ed9e08f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java @@ -25,12 +25,13 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.util.DataChecksum; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.security.MessageDigest; import java.util.List; @@ -42,8 +43,8 @@ public class ECBlockChecksumComputer extends AbstractBlockChecksumComputer { private static final Logger LOG = LoggerFactory.getLogger(ECBlockChecksumComputer.class); - private List chunkInfoList; - private OmKeyInfo keyInfo; + private final List chunkInfoList; + private final OmKeyInfo keyInfo; public ECBlockChecksumComputer( @@ -68,7 +69,7 @@ public void compute(OzoneClientConfig.ChecksumCombineMode combineMode) } - private void computeMd5Crc() throws IOException { + private void computeMd5Crc() { Preconditions.checkArgument(chunkInfoList.size() > 0); final ContainerProtos.ChunkInfo firstChunkInfo = chunkInfoList.get(0); @@ -77,32 +78,28 @@ private void computeMd5Crc() throws IOException { // Total parity checksum bytes per stripe to remove int parityBytes = getParityBytes(chunkSize, bytesPerCrc); - ByteArrayOutputStream out = new ByteArrayOutputStream(); + final MessageDigest digester = MD5Hash.getDigester(); for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { ByteString stripeChecksum = chunkInfo.getStripeChecksum(); Preconditions.checkNotNull(stripeChecksum); - byte[] checksumBytes = stripeChecksum.toByteArray(); - - Preconditions.checkArgument(checksumBytes.length % 4 == 0, + final int checksumSize = stripeChecksum.size(); + Preconditions.checkArgument(checksumSize % 4 == 0, "Checksum Bytes size does not match"); - ByteBuffer byteWrap = ByteBuffer - .wrap(checksumBytes, 0, checksumBytes.length - parityBytes); - byte[] currentChecksum = new byte[4]; - - while (byteWrap.hasRemaining()) { - byteWrap.get(currentChecksum); - out.write(currentChecksum); - } + final ByteBuffer byteWrap = stripeChecksum.asReadOnlyByteBuffer(); + byteWrap.limit(checksumSize - parityBytes); + digester.update(byteWrap); } - MD5Hash fileMD5 = MD5Hash.digest(out.toByteArray()); - setOutBytes(fileMD5.getDigest()); + final byte[] fileMD5 = digester.digest(); + setOutBytes(digester.digest()); - LOG.debug("Number of chunks={}, md5hash={}", - chunkInfoList.size(), fileMD5); + if (LOG.isDebugEnabled()) { + LOG.debug("Number of chunks={}, md5hash={}", + chunkInfoList.size(), StringUtils.bytes2HexString(fileMD5)); + } } private void computeCompositeCrc() throws IOException { @@ -124,12 +121,10 @@ private void computeCompositeCrc() throws IOException { // Bytes required to create a CRC long bytesPerCrc = firstChunkInfo.getChecksumData().getBytesPerChecksum(); - ECReplicationConfig replicationConfig = - (ECReplicationConfig) keyInfo.getReplicationConfig(); - long chunkSize = replicationConfig.getEcChunkSize(); + long chunkSize = firstChunkInfo.getLen(); //When EC chunk size is not a multiple of ozone.client.bytes.per.checksum - // (default = 1MB) the last checksum in an EC chunk is only generated for + // (default = 16KB) the last checksum in an EC chunk is only generated for // offset. long bytesPerCrcOffset = chunkSize % bytesPerCrc; @@ -149,17 +144,15 @@ private void computeCompositeCrc() throws IOException { ByteString stripeChecksum = chunkInfo.getStripeChecksum(); Preconditions.checkNotNull(stripeChecksum); - byte[] checksumBytes = stripeChecksum.toByteArray(); - - Preconditions.checkArgument(checksumBytes.length % 4 == 0, + final int checksumSize = stripeChecksum.size(); + Preconditions.checkArgument(checksumSize % 4 == 0, "Checksum Bytes size does not match"); CrcComposer chunkCrcComposer = CrcComposer.newCrcComposer(dataChecksumType, bytesPerCrc); // Limit parity bytes as they do not contribute to fileChecksum - ByteBuffer byteWrap = ByteBuffer - .wrap(checksumBytes, 0, checksumBytes.length - parityBytes); - byte[] currentChecksum = new byte[4]; + final ByteBuffer byteWrap = stripeChecksum.asReadOnlyByteBuffer(); + byteWrap.limit(checksumSize - parityBytes); long chunkOffsetIndex = 1; while (byteWrap.hasRemaining()) { @@ -177,8 +170,7 @@ private void computeCompositeCrc() throws IOException { currentChunkOffset = bytesPerCrcOffset; } - byteWrap.get(currentChecksum); - int checksumDataCrc = CrcUtil.readInt(currentChecksum, 0); + final int checksumDataCrc = byteWrap.getInt(); //To handle last chunk when it size is lower than 1524K in the case // of rs-3-2-1524k. long chunkSizePerChecksum = Math.min(Math.min(keySize, bytesPerCrc), diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java index cf976e3bd39c..2c0fc0c0d36a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java @@ -26,8 +26,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.MessageDigest; import java.util.List; /** @@ -39,7 +40,13 @@ public class ReplicatedBlockChecksumComputer extends private static final Logger LOG = LoggerFactory.getLogger(ReplicatedBlockChecksumComputer.class); - private List chunkInfoList; + static MD5Hash digest(ByteBuffer data) { + final MessageDigest digester = MD5Hash.getDigester(); + digester.update(data); + return new MD5Hash(digester.digest()); + } + + private final List chunkInfoList; public ReplicatedBlockChecksumComputer( List chunkInfoList) { @@ -62,20 +69,20 @@ public void compute(OzoneClientConfig.ChecksumCombineMode combineMode) } // compute the block checksum, which is the md5 of chunk checksums - private void computeMd5Crc() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - + private void computeMd5Crc() { + ByteString bytes = ByteString.EMPTY; for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { ContainerProtos.ChecksumData checksumData = chunkInfo.getChecksumData(); List checksums = checksumData.getChecksumsList(); for (ByteString checksum : checksums) { - baos.write(checksum.toByteArray()); + bytes = bytes.concat(checksum); } } - MD5Hash fileMD5 = MD5Hash.digest(baos.toByteArray()); + final MD5Hash fileMD5 = digest(bytes.asReadOnlyByteBuffer()); + setOutBytes(fileMD5.getDigest()); LOG.debug("number of chunks={}, md5out={}", @@ -121,7 +128,7 @@ private void computeCompositeCrc() throws IOException { Preconditions.checkArgument(remainingChunkSize <= checksums.size() * chunkSize); for (ByteString checksum : checksums) { - int checksumDataCrc = CrcUtil.readInt(checksum.toByteArray(), 0); + final int checksumDataCrc = checksum.asReadOnlyByteBuffer().getInt(); chunkCrcComposer.update(checksumDataCrc, Math.min(bytesPerCrc, remainingChunkSize)); remainingChunkSize -= bytesPerCrc; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java index 9bdec27f534f..ba3850ff3947 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java @@ -21,6 +21,8 @@ import java.io.OutputStream; import java.util.Collection; import java.util.Collections; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.hdds.client.BlockID; @@ -37,6 +39,7 @@ import org.apache.hadoop.security.token.Token; import com.google.common.annotations.VisibleForTesting; +import org.apache.ratis.util.JavaUtils; /** * A BlockOutputStreamEntry manages the data writes into the DataNodes. @@ -60,33 +63,30 @@ public class BlockOutputStreamEntry extends OutputStream { private long currentPosition; private final Token token; - private BufferPool bufferPool; - private ContainerClientMetrics clientMetrics; - private StreamBufferArgs streamBufferArgs; - - @SuppressWarnings({"parameternumber", "squid:S00107"}) - BlockOutputStreamEntry( - BlockID blockID, String key, - XceiverClientFactory xceiverClientManager, - Pipeline pipeline, - long length, - BufferPool bufferPool, - Token token, - OzoneClientConfig config, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs - ) { - this.config = config; + private final BufferPool bufferPool; + private final ContainerClientMetrics clientMetrics; + private final StreamBufferArgs streamBufferArgs; + private final Supplier executorServiceSupplier; + + BlockOutputStreamEntry(Builder b) { + this.config = b.config; this.outputStream = null; - this.blockID = blockID; - this.key = key; - this.xceiverClientManager = xceiverClientManager; - this.pipeline = pipeline; - this.token = token; - this.length = length; + this.blockID = b.blockID; + this.key = b.key; + this.xceiverClientManager = b.xceiverClientManager; + this.pipeline = b.pipeline; + this.token = b.token; + this.length = b.length; this.currentPosition = 0; - this.bufferPool = bufferPool; - this.clientMetrics = clientMetrics; - this.streamBufferArgs = streamBufferArgs; + this.bufferPool = b.bufferPool; + this.clientMetrics = b.clientMetrics; + this.streamBufferArgs = b.streamBufferArgs; + this.executorServiceSupplier = b.executorServiceSupplier; + } + + @Override + public String toString() { + return JavaUtils.getClassSimpleName(getClass()) + ":" + key + " " + blockID; } /** @@ -108,13 +108,18 @@ void checkStream() throws IOException { */ void createOutputStream() throws IOException { outputStream = new RatisBlockOutputStream(blockID, xceiverClientManager, - pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs); + pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs, + executorServiceSupplier); } ContainerClientMetrics getClientMetrics() { return clientMetrics; } + Supplier getExecutorServiceSupplier() { + return executorServiceSupplier; + } + StreamBufferArgs getStreamBufferArgs() { return streamBufferArgs; } @@ -361,6 +366,15 @@ public static class Builder { private OzoneClientConfig config; private ContainerClientMetrics clientMetrics; private StreamBufferArgs streamBufferArgs; + private Supplier executorServiceSupplier; + + public Pipeline getPipeline() { + return pipeline; + } + + public long getLength() { + return length; + } public Builder setBlockID(BlockID bID) { this.blockID = bID; @@ -402,23 +416,24 @@ public Builder setToken(Token bToken) { this.token = bToken; return this; } + public Builder setClientMetrics(ContainerClientMetrics clientMetrics) { this.clientMetrics = clientMetrics; return this; } + public Builder setStreamBufferArgs(StreamBufferArgs streamBufferArgs) { this.streamBufferArgs = streamBufferArgs; return this; } + public Builder setExecutorServiceSupplier(Supplier executorServiceSupplier) { + this.executorServiceSupplier = executorServiceSupplier; + return this; + } + public BlockOutputStreamEntry build() { - return new BlockOutputStreamEntry(blockID, - key, - xceiverClientManager, - pipeline, - length, - bufferPool, - token, config, clientMetrics, streamBufferArgs); + return new BlockOutputStreamEntry(this); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java index 52ef31daf590..1b7918a45a71 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java @@ -25,9 +25,10 @@ import java.util.List; import java.util.ListIterator; import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.ByteStringConversion; import org.apache.hadoop.hdds.scm.ContainerClientMetrics; import org.apache.hadoop.hdds.scm.OzoneClientConfig; @@ -63,7 +64,7 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { /** * List of stream entries that are used to write a block of data. */ - private final List streamEntries; + private final List streamEntries = new ArrayList<>(); private final OzoneClientConfig config; /** * The actual stream entry we are writing into. Note that a stream entry is @@ -74,7 +75,6 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { private final OzoneManagerProtocol omClient; private final OmKeyArgs keyArgs; private final XceiverClientFactory xceiverClientFactory; - private final String requestID; /** * A {@link BufferPool} shared between all * {@link org.apache.hadoop.hdds.scm.storage.BlockOutputStream}s managed by @@ -86,42 +86,36 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { private final ExcludeList excludeList; private final ContainerClientMetrics clientMetrics; private final StreamBufferArgs streamBufferArgs; + private final Supplier executorServiceSupplier; // update blocks on OM private ContainerBlockID lastUpdatedBlockId = new ContainerBlockID(-1, -1); - @SuppressWarnings({"parameternumber", "squid:S00107"}) - public BlockOutputStreamEntryPool( - OzoneClientConfig config, - OzoneManagerProtocol omClient, - String requestId, ReplicationConfig replicationConfig, - String uploadID, int partNumber, - boolean isMultipart, OmKeyInfo info, - boolean unsafeByteBufferConversion, - XceiverClientFactory xceiverClientFactory, long openID, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs - ) { - this.config = config; - this.xceiverClientFactory = xceiverClientFactory; - streamEntries = new ArrayList<>(); + public BlockOutputStreamEntryPool(KeyOutputStream.Builder b) { + this.config = b.getClientConfig(); + this.xceiverClientFactory = b.getXceiverManager(); currentStreamIndex = 0; - this.omClient = omClient; + this.omClient = b.getOmClient(); + final OmKeyInfo info = b.getOpenHandler().getKeyInfo(); this.keyArgs = new OmKeyArgs.Builder().setVolumeName(info.getVolumeName()) .setBucketName(info.getBucketName()).setKeyName(info.getKeyName()) - .setReplicationConfig(replicationConfig).setDataSize(info.getDataSize()) - .setIsMultipartKey(isMultipart).setMultipartUploadID(uploadID) - .setMultipartUploadPartNumber(partNumber).build(); - this.requestID = requestId; - this.openID = openID; + .setReplicationConfig(b.getReplicationConfig()) + .setDataSize(info.getDataSize()) + .setIsMultipartKey(b.isMultipartKey()) + .setMultipartUploadID(b.getMultipartUploadID()) + .setMultipartUploadPartNumber(b.getMultipartNumber()) + .build(); + this.openID = b.getOpenHandler().getId(); this.excludeList = createExcludeList(); + this.streamBufferArgs = b.getStreamBufferArgs(); this.bufferPool = new BufferPool(streamBufferArgs.getStreamBufferSize(), (int) (streamBufferArgs.getStreamBufferMaxSize() / streamBufferArgs .getStreamBufferSize()), ByteStringConversion - .createByteBufferConversion(unsafeByteBufferConversion)); - this.clientMetrics = clientMetrics; - this.streamBufferArgs = streamBufferArgs; + .createByteBufferConversion(b.isUnsafeByteBufferConversionEnabled())); + this.clientMetrics = b.getClientMetrics(); + this.executorServiceSupplier = b.getExecutorServiceSupplier(); } ExcludeList createExcludeList() { @@ -129,25 +123,6 @@ ExcludeList createExcludeList() { Clock.system(ZoneOffset.UTC)); } - BlockOutputStreamEntryPool(ContainerClientMetrics clientMetrics, - OzoneClientConfig clientConfig, StreamBufferArgs streamBufferArgs) { - streamEntries = new ArrayList<>(); - omClient = null; - keyArgs = null; - xceiverClientFactory = null; - config = clientConfig; - streamBufferArgs.setStreamBufferFlushDelay(false); - requestID = null; - int chunkSize = 0; - bufferPool = new BufferPool(chunkSize, 1); - - currentStreamIndex = 0; - openID = -1; - excludeList = createExcludeList(); - this.clientMetrics = clientMetrics; - this.streamBufferArgs = null; - } - /** * When a key is opened, it is possible that there are some blocks already * allocated to it for this open session. In this case, to make use of these @@ -159,10 +134,8 @@ ExcludeList createExcludeList() { * * @param version the set of blocks that are pre-allocated. * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException */ - public void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { + public void addPreallocateBlocks(OmKeyLocationInfoGroup version, long openVersion) { // server may return any number of blocks, (0 to any) // only the blocks allocated in this open session (block createVersion // equals to open session version) @@ -193,6 +166,7 @@ BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { .setToken(subKeyInfo.getToken()) .setClientMetrics(clientMetrics) .setStreamBufferArgs(streamBufferArgs) + .setExecutorServiceSupplier(executorServiceSupplier) .build(); } @@ -263,6 +237,10 @@ StreamBufferArgs getStreamBufferArgs() { return streamBufferArgs; } + public Supplier getExecutorServiceSupplier() { + return executorServiceSupplier; + } + /** * Discards the subsequent pre allocated blocks and removes the streamEntries * from the streamEntries list for the container which is closed. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java index 07d0f46069ca..241754a57f19 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java @@ -23,17 +23,10 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.ContainerClientMetrics; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.StreamBufferArgs; -import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; -import org.apache.hadoop.hdds.scm.storage.BufferPool; import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.security.token.Token; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,19 +68,10 @@ public class ECBlockOutputStreamEntry extends BlockOutputStreamEntry { private int currentStreamIdx = 0; private long successfulBlkGrpAckedLen; - @SuppressWarnings({"parameternumber", "squid:S00107"}) - ECBlockOutputStreamEntry(BlockID blockID, String key, - XceiverClientFactory xceiverClientManager, Pipeline pipeline, long length, - BufferPool bufferPool, Token token, - OzoneClientConfig config, ContainerClientMetrics clientMetrics, - StreamBufferArgs streamBufferArgs) { - super(blockID, key, xceiverClientManager, pipeline, length, bufferPool, - token, config, clientMetrics, streamBufferArgs); - assertInstanceOf( - pipeline.getReplicationConfig(), ECReplicationConfig.class); - this.replicationConfig = - (ECReplicationConfig) pipeline.getReplicationConfig(); - this.length = replicationConfig.getData() * length; + ECBlockOutputStreamEntry(Builder b) { + super(b); + this.replicationConfig = assertInstanceOf(b.getPipeline().getReplicationConfig(), ECReplicationConfig.class); + this.length = replicationConfig.getData() * b.getLength(); } @Override @@ -101,7 +85,8 @@ void checkStream() throws IOException { streams[i] = new ECBlockOutputStream(getBlockID(), getXceiverClientManager(), createSingleECBlockPipeline(getPipeline(), nodes.get(i), i + 1), - getBufferPool(), getConf(), getToken(), getClientMetrics(), getStreamBufferArgs()); + getBufferPool(), getConf(), getToken(), getClientMetrics(), getStreamBufferArgs(), + getExecutorServiceSupplier()); } blockOutputStreams = streams; } @@ -433,82 +418,9 @@ public ByteString calculateChecksum() throws IOException { /** * Builder class for ChunkGroupOutputStreamEntry. * */ - public static class Builder { - private BlockID blockID; - private String key; - private XceiverClientFactory xceiverClientManager; - private Pipeline pipeline; - private long length; - private BufferPool bufferPool; - private Token token; - private OzoneClientConfig config; - private ContainerClientMetrics clientMetrics; - private StreamBufferArgs streamBufferArgs; - - public ECBlockOutputStreamEntry.Builder setBlockID(BlockID bID) { - this.blockID = bID; - return this; - } - - public ECBlockOutputStreamEntry.Builder setKey(String keys) { - this.key = keys; - return this; - } - - public ECBlockOutputStreamEntry.Builder setXceiverClientManager( - XceiverClientFactory - xClientManager) { - this.xceiverClientManager = xClientManager; - return this; - } - - public ECBlockOutputStreamEntry.Builder setPipeline(Pipeline ppln) { - this.pipeline = ppln; - return this; - } - - public ECBlockOutputStreamEntry.Builder setLength(long len) { - this.length = len; - return this; - } - - public ECBlockOutputStreamEntry.Builder setBufferPool(BufferPool pool) { - this.bufferPool = pool; - return this; - } - - public ECBlockOutputStreamEntry.Builder setConfig( - OzoneClientConfig clientConfig) { - this.config = clientConfig; - return this; - } - - public ECBlockOutputStreamEntry.Builder setToken( - Token bToken) { - this.token = bToken; - return this; - } - - public ECBlockOutputStreamEntry.Builder setClientMetrics( - ContainerClientMetrics containerClientMetrics) { - this.clientMetrics = containerClientMetrics; - return this; - } - - public ECBlockOutputStreamEntry.Builder setStreamBufferArgs( - StreamBufferArgs args) { - this.streamBufferArgs = args; - return this; - } - + public static class Builder extends BlockOutputStreamEntry.Builder { public ECBlockOutputStreamEntry build() { - return new ECBlockOutputStreamEntry(blockID, - key, - xceiverClientManager, - pipeline, - length, - bufferPool, - token, config, clientMetrics, streamBufferArgs); + return new ECBlockOutputStreamEntry(this); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java index e551605d842d..6eb9aed0d3ad 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java @@ -17,19 +17,7 @@ */ package org.apache.hadoop.ozone.client.io; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.scm.ContainerClientMetrics; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.StreamBufferArgs; -import org.apache.hadoop.hdds.scm.XceiverClientFactory; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; - -import java.time.Clock; -import java.time.ZoneOffset; /** * {@link BlockOutputStreamEntryPool} is responsible to manage OM communication @@ -44,37 +32,14 @@ * @see ECBlockOutputStreamEntry */ public class ECBlockOutputStreamEntryPool extends BlockOutputStreamEntryPool { - - @SuppressWarnings({"parameternumber", "squid:S00107"}) - public ECBlockOutputStreamEntryPool(OzoneClientConfig config, - OzoneManagerProtocol omClient, - String requestId, - ReplicationConfig replicationConfig, - String uploadID, - int partNumber, - boolean isMultipart, - OmKeyInfo info, - boolean unsafeByteBufferConversion, - XceiverClientFactory xceiverClientFactory, - long openID, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs) { - super(config, omClient, requestId, replicationConfig, uploadID, partNumber, - isMultipart, info, unsafeByteBufferConversion, xceiverClientFactory, - openID, clientMetrics, streamBufferArgs); - assert replicationConfig instanceof ECReplicationConfig; - } - - @Override - ExcludeList createExcludeList() { - return new ExcludeList(getConfig().getExcludeNodesExpiryTime(), - Clock.system(ZoneOffset.UTC)); + public ECBlockOutputStreamEntryPool(ECKeyOutputStream.Builder builder) { + super(builder); } @Override - BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { - return - new ECBlockOutputStreamEntry.Builder() - .setBlockID(subKeyInfo.getBlockID()) + ECBlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { + final ECBlockOutputStreamEntry.Builder b = new ECBlockOutputStreamEntry.Builder(); + b.setBlockID(subKeyInfo.getBlockID()) .setKey(getKeyName()) .setXceiverClientManager(getXceiverClientFactory()) .setPipeline(subKeyInfo.getPipeline()) @@ -84,7 +49,8 @@ BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { .setToken(subKeyInfo.getToken()) .setClientMetrics(getClientMetrics()) .setStreamBufferArgs(getStreamBufferArgs()) - .build(); + .setExecutorServiceSupplier(getExecutorServiceSupplier()); + return b.build(); } @Override diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java index b5c36474ff9e..0cb3973e0411 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java @@ -17,41 +17,16 @@ */ package org.apache.hadoop.ozone.client.io; -import java.io.IOException; -import java.nio.Buffer; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; import org.apache.hadoop.io.ByteBufferPool; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.om.protocol.S3Auth; import org.apache.ozone.erasurecode.rawcoder.RawErasureEncoder; import org.apache.ozone.erasurecode.rawcoder.util.CodecUtil; @@ -59,6 +34,21 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + /** * ECKeyOutputStream handles the EC writes by writing the data into underlying * block output streams chunk by chunk. @@ -74,7 +64,6 @@ public final class ECKeyOutputStream extends KeyOutputStream private final int numParityBlks; private final ByteBufferPool bufferPool; private final RawErasureEncoder encoder; - private final ExecutorService flushExecutor; private final Future flushFuture; private final AtomicLong flushCheckpoint; @@ -100,22 +89,6 @@ private enum StripeWriteStatus { private long offset; // how much data has been ingested into the stream private long writeOffset; - private final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool; - - @VisibleForTesting - public List getStreamEntries() { - return blockOutputStreamEntryPool.getStreamEntries(); - } - - @VisibleForTesting - public XceiverClientFactory getXceiverClientFactory() { - return blockOutputStreamEntryPool.getXceiverClientFactory(); - } - - @VisibleForTesting - public List getLocationInfoList() { - return blockOutputStreamEntryPool.getLocationInfoList(); - } @VisibleForTesting public void insertFlushCheckpoint(long version) throws IOException { @@ -128,8 +101,7 @@ public long getFlushCheckpoint() { } private ECKeyOutputStream(Builder builder) { - super(builder.getReplicationConfig(), builder.getClientMetrics(), - builder.getClientConfig(), builder.getStreamBufferArgs()); + super(builder.getReplicationConfig(), new ECBlockOutputStreamEntryPool(builder)); this.config = builder.getClientConfig(); this.bufferPool = builder.getByteBufferPool(); // For EC, cell/chunk size and buffer size can be same for now. @@ -140,46 +112,24 @@ private ECKeyOutputStream(Builder builder) { ecChunkSize, numDataBlks, numParityBlks, bufferPool); chunkIndex = 0; ecStripeQueue = new ArrayBlockingQueue<>(config.getEcStripeQueueSize()); - OmKeyInfo info = builder.getOpenHandler().getKeyInfo(); - blockOutputStreamEntryPool = - new ECBlockOutputStreamEntryPool(config, - builder.getOmClient(), builder.getRequestID(), - builder.getReplicationConfig(), - builder.getMultipartUploadID(), builder.getMultipartNumber(), - builder.isMultipartKey(), - info, builder.isUnsafeByteBufferConversionEnabled(), - builder.getXceiverManager(), builder.getOpenHandler().getId(), - builder.getClientMetrics(), builder.getStreamBufferArgs()); this.writeOffset = 0; this.encoder = CodecUtil.createRawEncoderWithFallback( builder.getReplicationConfig()); - this.flushExecutor = Executors.newSingleThreadExecutor(); S3Auth s3Auth = builder.getS3CredentialsProvider().get(); ThreadLocal s3CredentialsProvider = builder.getS3CredentialsProvider(); - flushExecutor.submit(() -> s3CredentialsProvider.set(s3Auth)); - this.flushFuture = this.flushExecutor.submit(this::flushStripeFromQueue); + this.flushFuture = builder.getExecutorServiceSupplier().get().submit(() -> { + s3CredentialsProvider.set(s3Auth); + return flushStripeFromQueue(); + }); this.flushCheckpoint = new AtomicLong(0); this.atomicKeyCreation = builder.getAtomicKeyCreation(); } - /** - * When a key is opened, it is possible that there are some blocks already - * allocated to it for this open session. In this case, to make use of these - * blocks, we need to add these blocks to stream entries. But, a key's version - * also includes blocks from previous versions, we need to avoid adding these - * old blocks to stream entries, because these old blocks should not be picked - * for write. To do this, the following method checks that, only those - * blocks created in this particular open version are added to stream entries. - * - * @param version the set of blocks that are pre-allocated. - * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException - */ - public void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { - blockOutputStreamEntryPool.addPreallocateBlocks(version, openVersion); + @Override + protected ECBlockOutputStreamEntryPool getBlockOutputStreamEntryPool() { + return (ECBlockOutputStreamEntryPool) super.getBlockOutputStreamEntryPool(); } /** @@ -218,6 +168,7 @@ private void rollbackAndReset(ECChunkBuffers stripe) throws IOException { final ByteBuffer[] dataBuffers = stripe.getDataBuffers(); offset -= Arrays.stream(dataBuffers).mapToInt(Buffer::limit).sum(); + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); final ECBlockOutputStreamEntry failedStreamEntry = blockOutputStreamEntryPool.getCurrentStreamEntry(); failedStreamEntry.resetToFirstEntry(); @@ -256,8 +207,7 @@ private void logStreamError(List failedStreams, private StripeWriteStatus commitStripeWrite(ECChunkBuffers stripe) throws IOException { - ECBlockOutputStreamEntry streamEntry = - blockOutputStreamEntryPool.getCurrentStreamEntry(); + final ECBlockOutputStreamEntry streamEntry = getBlockOutputStreamEntryPool().getCurrentStreamEntry(); List failedStreams = streamEntry.streamsWithWriteFailure(); if (!failedStreams.isEmpty()) { @@ -297,6 +247,7 @@ private void excludePipelineAndFailedDN(Pipeline pipeline, List failedStreams) { // Exclude the failed pipeline + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); blockOutputStreamEntryPool.getExcludeList().addPipeline(pipeline.getId()); // If the failure is NOT caused by other reasons (e.g. container full), @@ -362,6 +313,7 @@ private void generateParityCells() throws IOException { } private void writeDataCells(ECChunkBuffers stripe) throws IOException { + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); blockOutputStreamEntryPool.allocateBlockIfNeeded(); ByteBuffer[] dataCells = stripe.getDataBuffers(); for (int i = 0; i < numDataBlks; i++) { @@ -374,6 +326,7 @@ private void writeDataCells(ECChunkBuffers stripe) throws IOException { private void writeParityCells(ECChunkBuffers stripe) { // Move the stream entry cursor to parity block index + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); blockOutputStreamEntryPool .getCurrentStreamEntry().forceToFirstParityBlock(); ByteBuffer[] parityCells = stripe.getParityBuffers(); @@ -413,7 +366,7 @@ private void handleOutputStreamWrite(ByteBuffer buffer, boolean isParity) { // The len cannot be bigger than cell buffer size. assert buffer.limit() <= ecChunkSize : "The buffer size: " + buffer.limit() + " should not exceed EC chunk size: " + ecChunkSize; - writeToOutputStream(blockOutputStreamEntryPool.getCurrentStreamEntry(), + writeToOutputStream(getBlockOutputStreamEntryPool().getCurrentStreamEntry(), buffer.array(), buffer.limit(), 0, isParity); } catch (Exception e) { markStreamAsFailed(e); @@ -449,8 +402,7 @@ private void handleException(BlockOutputStreamEntry streamEntry, Preconditions.checkNotNull(t); boolean containerExclusionException = checkIfContainerToExclude(t); if (containerExclusionException) { - blockOutputStreamEntryPool.getExcludeList() - .addPipeline(streamEntry.getPipeline().getId()); + getBlockOutputStreamEntryPool().getExcludeList().addPipeline(streamEntry.getPipeline().getId()); } markStreamAsFailed(exception); } @@ -460,7 +412,7 @@ private void markStreamClosed() { } private void markStreamAsFailed(Exception e) { - blockOutputStreamEntryPool.getCurrentStreamEntry().markFailed(e); + getBlockOutputStreamEntryPool().getCurrentStreamEntry().markFailed(e); } @Override @@ -470,6 +422,7 @@ public void flush() { private void closeCurrentStreamEntry() throws IOException { + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); if (!blockOutputStreamEntryPool.isEmpty()) { while (true) { try { @@ -503,6 +456,7 @@ public void close() throws IOException { return; } closed = true; + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); try { if (!closing) { // If stripe buffer is not empty, encode and flush the stripe. @@ -539,7 +493,6 @@ public void close() throws IOException { } catch (InterruptedException e) { throw new IOException("Flushing thread was interrupted", e); } finally { - flushExecutor.shutdownNow(); closeCurrentStreamEntry(); blockOutputStreamEntryPool.cleanup(); } @@ -614,20 +567,6 @@ public static void padBufferToLimit(ByteBuffer buf, int limit) { buf.position(limit); } - public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return blockOutputStreamEntryPool.getCommitUploadPartInfo(); - } - - @VisibleForTesting - public ExcludeList getExcludeList() { - return blockOutputStreamEntryPool.getExcludeList(); - } - - @Override - public Map getMetadata() { - return this.blockOutputStreamEntryPool.getMetadata(); - } - /** * Builder class of ECKeyOutputStream. */ @@ -682,9 +621,8 @@ public ECKeyOutputStream build() { */ private void checkNotClosed() throws IOException { if (closing || closed) { - throw new IOException( - ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " - + blockOutputStreamEntryPool.getKeyName()); + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + + getBlockOutputStreamEntryPool().getKeyName()); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index 6b6be1abd40e..15e84cf37037 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -25,6 +25,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -59,9 +60,9 @@ private static List createStreams( OmKeyInfo keyInfo, List blockInfos, XceiverClientFactory xceiverClientFactory, - boolean verifyChecksum, Function retryFunction, - BlockInputStreamFactory blockStreamFactory) { + BlockInputStreamFactory blockStreamFactory, + OzoneClientConfig config) { boolean isHsyncFile = keyInfo.getMetadata().containsKey(OzoneConsts.HSYNC_CLIENT_ID); List partStreams = new ArrayList<>(); for (int i = 0; i < blockInfos.size(); i++) { @@ -99,9 +100,9 @@ private static List createStreams( omKeyLocationInfo, omKeyLocationInfo.getPipeline(), omKeyLocationInfo.getToken(), - verifyChecksum, xceiverClientFactory, - retry); + retry, + config); partStreams.add(stream); } return partStreams; @@ -125,13 +126,13 @@ private static BlockLocationInfo getBlockLocationInfo(OmKeyInfo newKeyInfo, private static LengthInputStream getFromOmKeyInfo( OmKeyInfo keyInfo, XceiverClientFactory xceiverClientFactory, - boolean verifyChecksum, Function retryFunction, BlockInputStreamFactory blockStreamFactory, - List locationInfos) { + List locationInfos, + OzoneClientConfig config) { List streams = createStreams(keyInfo, - locationInfos, xceiverClientFactory, verifyChecksum, retryFunction, - blockStreamFactory); + locationInfos, xceiverClientFactory, retryFunction, + blockStreamFactory, config); KeyInputStream keyInputStream = new KeyInputStream(keyInfo.getKeyName(), streams); return new LengthInputStream(keyInputStream, keyInputStream.getLength()); @@ -142,20 +143,22 @@ private static LengthInputStream getFromOmKeyInfo( */ public static LengthInputStream getFromOmKeyInfo(OmKeyInfo keyInfo, XceiverClientFactory xceiverClientFactory, - boolean verifyChecksum, Function retryFunction, - BlockInputStreamFactory blockStreamFactory) { + Function retryFunction, + BlockInputStreamFactory blockStreamFactory, + OzoneClientConfig config) { List keyLocationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); - return getFromOmKeyInfo(keyInfo, xceiverClientFactory, verifyChecksum, - retryFunction, blockStreamFactory, keyLocationInfos); + return getFromOmKeyInfo(keyInfo, xceiverClientFactory, + retryFunction, blockStreamFactory, keyLocationInfos, config); } public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo, - XceiverClientFactory xceiverClientFactory, boolean verifyChecksum, + XceiverClientFactory xceiverClientFactory, Function retryFunction, - BlockInputStreamFactory blockStreamFactory) { + BlockInputStreamFactory blockStreamFactory, + OzoneClientConfig config) { List keyLocationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); @@ -170,7 +173,8 @@ public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo, // Create a KeyInputStream for each part. for (List locationInfo : partsToBlocksMap.values()) { lengthInputStreams.add(getFromOmKeyInfo(keyInfo, xceiverClientFactory, - verifyChecksum, retryFunction, blockStreamFactory, locationInfo)); + retryFunction, blockStreamFactory, locationInfo, + config)); } return lengthInputStreams; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java index 8b128e9cd945..11c644cd5507 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java @@ -24,7 +24,9 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.ExecutorService; import java.util.function.Function; +import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.hadoop.fs.FSExceptionMessages; @@ -69,7 +71,6 @@ public class KeyOutputStream extends OutputStream implements Syncable, KeyMetadataAware { - private OzoneClientConfig config; private final ReplicationConfig replication; /** @@ -105,11 +106,8 @@ enum StreamAction { */ private boolean atomicKeyCreation; - public KeyOutputStream(ReplicationConfig replicationConfig, - ContainerClientMetrics clientMetrics, OzoneClientConfig clientConfig, - StreamBufferArgs streamBufferArgs) { + public KeyOutputStream(ReplicationConfig replicationConfig, BlockOutputStreamEntryPool blockOutputStreamEntryPool) { this.replication = replicationConfig; - this.config = clientConfig; closed = false; this.retryPolicyMap = HddsClientUtils.getExceptionList() .stream() @@ -117,18 +115,16 @@ public KeyOutputStream(ReplicationConfig replicationConfig, e -> RetryPolicies.TRY_ONCE_THEN_FAIL)); retryCount = 0; offset = 0; - blockOutputStreamEntryPool = - new BlockOutputStreamEntryPool(clientMetrics, clientConfig, streamBufferArgs); + this.blockOutputStreamEntryPool = blockOutputStreamEntryPool; } - @VisibleForTesting - public List getStreamEntries() { - return blockOutputStreamEntryPool.getStreamEntries(); + protected BlockOutputStreamEntryPool getBlockOutputStreamEntryPool() { + return blockOutputStreamEntryPool; } @VisibleForTesting - public XceiverClientFactory getXceiverClientFactory() { - return blockOutputStreamEntryPool.getXceiverClientFactory(); + public List getStreamEntries() { + return blockOutputStreamEntryPool.getStreamEntries(); } @VisibleForTesting @@ -146,39 +142,18 @@ public long getClientID() { return clientID; } - @SuppressWarnings({"parameternumber", "squid:S00107"}) - public KeyOutputStream( - OzoneClientConfig config, - OpenKeySession handler, - XceiverClientFactory xceiverClientManager, - OzoneManagerProtocol omClient, - String requestId, ReplicationConfig replicationConfig, - String uploadID, int partNumber, boolean isMultipart, - boolean unsafeByteBufferConversion, - ContainerClientMetrics clientMetrics, - boolean atomicKeyCreation, StreamBufferArgs streamBufferArgs - ) { - this.config = config; - this.replication = replicationConfig; - blockOutputStreamEntryPool = - new BlockOutputStreamEntryPool( - config, - omClient, - requestId, replicationConfig, - uploadID, partNumber, - isMultipart, handler.getKeyInfo(), - unsafeByteBufferConversion, - xceiverClientManager, - handler.getId(), - clientMetrics, streamBufferArgs); + public KeyOutputStream(Builder b) { + this.replication = b.replicationConfig; + this.blockOutputStreamEntryPool = new BlockOutputStreamEntryPool(b); + final OzoneClientConfig config = b.getClientConfig(); this.retryPolicyMap = HddsClientUtils.getRetryPolicyByException( config.getMaxRetryCount(), config.getRetryInterval()); this.retryCount = 0; this.isException = false; this.writeOffset = 0; - this.clientID = handler.getId(); - this.atomicKeyCreation = atomicKeyCreation; - this.streamBufferArgs = streamBufferArgs; + this.clientID = b.getOpenHandler().getId(); + this.atomicKeyCreation = b.getAtomicKeyCreation(); + this.streamBufferArgs = b.getStreamBufferArgs(); } /** @@ -192,10 +167,8 @@ public KeyOutputStream( * * @param version the set of blocks that are pre-allocated. * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException */ - public synchronized void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { + public synchronized void addPreallocateBlocks(OmKeyLocationInfoGroup version, long openVersion) { blockOutputStreamEntryPool.addPreallocateBlocks(version, openVersion); } @@ -583,7 +556,7 @@ public synchronized void close() throws IOException { } } - public synchronized OmMultipartCommitUploadPartInfo + synchronized OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { return blockOutputStreamEntryPool.getCommitUploadPartInfo(); } @@ -615,6 +588,7 @@ public static class Builder { private ContainerClientMetrics clientMetrics; private boolean atomicKeyCreation = false; private StreamBufferArgs streamBufferArgs; + private Supplier executorServiceSupplier; public String getMultipartUploadID() { return multipartUploadID; @@ -728,21 +702,17 @@ public boolean getAtomicKeyCreation() { return atomicKeyCreation; } + public Builder setExecutorServiceSupplier(Supplier executorServiceSupplier) { + this.executorServiceSupplier = executorServiceSupplier; + return this; + } + + public Supplier getExecutorServiceSupplier() { + return executorServiceSupplier; + } + public KeyOutputStream build() { - return new KeyOutputStream( - clientConfig, - openHandler, - xceiverManager, - omClient, - requestID, - replicationConfig, - multipartUploadID, - multipartNumber, - isMultipartKey, - unsafeByteBufferConversion, - clientMetrics, - atomicKeyCreation, - streamBufferArgs); + return new KeyOutputStream(this); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 46e7e20b51b0..f7b84e487d0a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -327,9 +327,9 @@ List listBuckets(String volumeName, String bucketPrefix, * @param bucketName Name of the Bucket * @param keyName Name of the Key * @param size Size of the data - * @param metadata custom key value metadata + * @param metadata Custom key value metadata * @return {@link OzoneOutputStream} - * + * @deprecated Use {@link ClientProtocol#createKey(String, String, String, long, ReplicationConfig, Map)} instead. */ @Deprecated OzoneOutputStream createKey(String volumeName, String bucketName, @@ -344,7 +344,7 @@ OzoneOutputStream createKey(String volumeName, String bucketName, * @param bucketName Name of the Bucket * @param keyName Name of the Key * @param size Size of the data - * @param metadata custom key value metadata + * @param metadata Custom key value metadata * @return {@link OzoneOutputStream} * */ @@ -353,6 +353,22 @@ OzoneOutputStream createKey(String volumeName, String bucketName, Map metadata) throws IOException; + /** + * Writes a key in an existing bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @param size Size of the data + * @param metadata Custom key value metadata + * @param tags Tags used for S3 object tags + * @return {@link OzoneOutputStream} + * + */ + OzoneOutputStream createKey(String volumeName, String bucketName, + String keyName, long size, ReplicationConfig replicationConfig, + Map metadata, Map tags) + throws IOException; + /** * Writes a key in an existing bucket. * @param volumeName Name of the Volume @@ -368,6 +384,22 @@ OzoneDataStreamOutput createStreamKey(String volumeName, String bucketName, Map metadata) throws IOException; + /** + * Writes a key in an existing bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @param size Size of the data + * @param metadata custom key value metadata + * @param tags Tags used for S3 object tags + * @return {@link OzoneDataStreamOutput} + * + */ + OzoneDataStreamOutput createStreamKey(String volumeName, String bucketName, + String keyName, long size, ReplicationConfig replicationConfig, + Map metadata, Map tags) + throws IOException; + /** * Reads a key from an existing bucket. * @param volumeName Name of the Volume @@ -509,10 +541,10 @@ OmMultipartInfo initiateMultipartUpload(String volumeName, String /** * Initiate Multipart upload. - * @param volumeName - * @param bucketName - * @param keyName - * @param replicationConfig + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @param replicationConfig Replication Config * @return {@link OmMultipartInfo} * @throws IOException */ @@ -520,6 +552,37 @@ OmMultipartInfo initiateMultipartUpload(String volumeName, String bucketName, String keyName, ReplicationConfig replicationConfig) throws IOException; + /** + * Initiate Multipart upload. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @param replicationConfig Replication config + * @param metadata Custom key value metadata + * @return {@link OmMultipartInfo} + * @throws IOException + */ + OmMultipartInfo initiateMultipartUpload(String volumeName, String + bucketName, String keyName, ReplicationConfig replicationConfig, + Map metadata) + throws IOException; + + /** + * Initiate Multipart upload. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyName Name of the Key + * @param replicationConfig Replication config + * @param metadata Custom key value metadata + * @param tags Tags used for S3 object tags + * @return {@link OmMultipartInfo} + * @throws IOException + */ + OmMultipartInfo initiateMultipartUpload(String volumeName, String + bucketName, String keyName, ReplicationConfig replicationConfig, + Map metadata, Map tags) + throws IOException; + /** * Create a part key for a multipart upload key. * @param volumeName @@ -1092,6 +1155,19 @@ Map> getKeysEveryReplicas( String createSnapshot(String volumeName, String bucketName, String snapshotName) throws IOException; + /** + * Rename snapshot. + * + * @param volumeName Vol to be used + * @param bucketName Bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException; + /** * Delete snapshot. * @param volumeName vol to be used diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 7e1e6fe45602..d0266c95a26e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -145,6 +146,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.util.MemoizedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -155,7 +157,6 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -195,10 +196,13 @@ public class RpcClient implements ClientProtocol { // for reconstruction. private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; + private static final int WRITE_POOL_MIN_SIZE = 1; + private final ConfigurationSource conf; private final OzoneManagerClientProtocol ozoneManagerClient; private final XceiverClientFactory xceiverClientManager; private final UserGroupInformation ugi; + private UserGroupInformation s3gUgi; private final ACLType userRights; private final ACLType groupRights; private final ClientId clientId = ClientId.randomId(); @@ -213,8 +217,9 @@ public class RpcClient implements ClientProtocol { private final ByteBufferPool byteBufferPool; private final BlockInputStreamFactory blockInputStreamFactory; private final OzoneManagerVersion omVersion; - private volatile ExecutorService ecReconstructExecutor; + private final MemoizedSupplier ecReconstructExecutor; private final ContainerClientMetrics clientMetrics; + private final MemoizedSupplier writeExecutor; private final AtomicBoolean isS3GRequest = new AtomicBoolean(false); /** @@ -237,6 +242,11 @@ public RpcClient(ConfigurationSource conf, String omServiceId) this.groupRights = aclConfig.getGroupDefaultRights(); this.clientConfig = conf.getObject(OzoneClientConfig.class); + this.ecReconstructExecutor = MemoizedSupplier.valueOf(() -> createThreadPoolExecutor( + EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, clientConfig.getEcReconstructStripeReadPoolLimit(), + "ec-reconstruct-reader-TID-%d")); + this.writeExecutor = MemoizedSupplier.valueOf(() -> createThreadPoolExecutor( + WRITE_POOL_MIN_SIZE, Integer.MAX_VALUE, "client-write-TID-%d")); OmTransport omTransport = createOmTransport(omServiceId); OzoneManagerProtocolClientSideTranslatorPB @@ -246,6 +256,9 @@ public RpcClient(ConfigurationSource conf, String omServiceId) this.ozoneManagerClient = TracingUtil.createProxy( ozoneManagerProtocolClientSideTranslatorPB, OzoneManagerClientProtocol.class, conf); + if (getThreadLocalS3Auth() != null) { + this.s3gUgi = UserGroupInformation.createRemoteUser(getThreadLocalS3Auth().getUserPrincipal()); + } dtService = omTransport.getDelegationTokenService(); ServiceInfoEx serviceInfoEx = ozoneManagerClient.getServiceInfo(); omVersion = getOmVersion(serviceInfoEx); @@ -311,8 +324,10 @@ public void onRemoval( }).build(); this.byteBufferPool = new ElasticByteBufferPool(); this.blockInputStreamFactory = BlockInputStreamFactoryImpl - .getInstance(byteBufferPool, this::getECReconstructExecutor); + .getInstance(byteBufferPool, ecReconstructExecutor); this.clientMetrics = ContainerClientMetrics.acquire(); + + TracingUtil.initTracing("client", conf); } public XceiverClientFactory getXceiverClientManager() { @@ -423,15 +438,16 @@ public void createVolume(String volumeName, VolumeArgs volArgs) List listOfAcls = new ArrayList<>(); //User ACL listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - owner, userRights, ACCESS)); + owner, ACCESS, userRights)); //Group ACLs of the User List userGroups = Arrays.asList(UserGroupInformation .createRemoteUser(owner).getGroupNames()); userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, groupRights, ACCESS))); + new OzoneAcl(ACLIdentityType.GROUP, group, ACCESS, groupRights))); //ACLs from VolumeArgs - if (volArgs.getAcls() != null) { - listOfAcls.addAll(volArgs.getAcls()); + List volumeAcls = volArgs.getAcls(); + if (volumeAcls != null) { + listOfAcls.addAll(volumeAcls); } OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); @@ -626,8 +642,7 @@ public void createBucket( ugi.getShortUserName() : bucketArgs.getOwner(); } - Boolean isVersionEnabled = bucketArgs.getVersioning() == null ? - Boolean.FALSE : bucketArgs.getVersioning(); + boolean isVersionEnabled = bucketArgs.getVersioning(); StorageType storageType = bucketArgs.getStorageType() == null ? StorageType.DEFAULT : bucketArgs.getStorageType(); BucketLayout bucketLayout = bucketArgs.getBucketLayout(); @@ -728,17 +743,24 @@ private static void verifySpaceQuota(long quota) throws OMException { * @return listOfAcls * */ private List getAclList() { + UserGroupInformation realUserInfo = getRealUserInfo(); + return OzoneAclUtil.getAclList(realUserInfo.getUserName(), + realUserInfo.getGroupNames(), userRights, groupRights); + } + + /** + * Helper function to get the actual operating user. + * + * @return listOfAcls + * */ + private UserGroupInformation getRealUserInfo() { + // After HDDS-5881 the user will not be different, + // as S3G uses single RpcClient. So we should be checking thread-local + // S3Auth and use it during proxy. if (ozoneManagerClient.getThreadLocalS3Auth() != null) { - UserGroupInformation aclUgi = - UserGroupInformation.createRemoteUser( - ozoneManagerClient.getThreadLocalS3Auth().getAccessID()); - return OzoneAclUtil.getAclList( - aclUgi.getUserName(), - aclUgi.getGroupNames(), - userRights, groupRights); + return s3gUgi; } - return OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - userRights, groupRights); + return ugi; } /** @@ -748,10 +770,7 @@ private List getAclList() { * @return OzoneAcl */ private OzoneAcl linkBucketDefaultAcl() { - BitSet aclRights = new BitSet(); - aclRights.set(READ.ordinal()); - aclRights.set(WRITE.ordinal()); - return new OzoneAcl(ACLIdentityType.WORLD, "", aclRights, ACCESS); + return new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, READ, WRITE); } /** @@ -964,6 +983,31 @@ public String createSnapshot(String volumeName, bucketName, snapshotName); } + /** + * Rename Snapshot. + * + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + @Override + public void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException { + Preconditions.checkArgument(StringUtils.isNotBlank(volumeName), + "volume can't be null or empty."); + Preconditions.checkArgument(StringUtils.isNotBlank(bucketName), + "bucket can't be null or empty."); + Preconditions.checkArgument(StringUtils.isNotBlank(snapshotOldName), + "old snapshot name can't be null or empty."); + Preconditions.checkArgument(StringUtils.isNotBlank(snapshotNewName), + "new snapshot name can't be null or empty."); + + ozoneManagerClient.renameSnapshot(volumeName, bucketName, snapshotOldName, snapshotNewName); + } + /** * Delete Snapshot. * @param volumeName vol to be used @@ -1344,6 +1388,15 @@ public OzoneOutputStream createKey( ReplicationConfig replicationConfig, Map metadata) throws IOException { + return createKey(volumeName, bucketName, keyName, size, replicationConfig, + metadata, Collections.emptyMap()); + } + + @Override + public OzoneOutputStream createKey( + String volumeName, String bucketName, String keyName, long size, + ReplicationConfig replicationConfig, + Map metadata, Map tags) throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); if (checkKeyNameEnabled) { @@ -1361,9 +1414,16 @@ public OzoneOutputStream createKey( } } + if (omVersion.compareTo(OzoneManagerVersion.OBJECT_TAG) < 0) { + if (tags != null && !tags.isEmpty()) { + throw new IOException("OzoneManager does not support object tags"); + } + } + if (replicationConfig != null) { replicationConfigValidator.validate(replicationConfig); } + String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder() .setVolumeName(volumeName) @@ -1372,8 +1432,10 @@ public OzoneOutputStream createKey( .setDataSize(size) .setReplicationConfig(replicationConfig) .addAllMetadataGdpr(metadata) + .addAllTags(tags) .setAcls(getAclList()) - .setLatestVersionLocation(getLatestVersionLocation); + .setLatestVersionLocation(getLatestVersionLocation) + .setOwnerName(ownerName); OpenKeySession openKey = ozoneManagerClient.openKey(builder.build()); // For bucket with layout OBJECT_STORE, when create an empty file (size=0), @@ -1392,12 +1454,29 @@ public OzoneDataStreamOutput createStreamKey( ReplicationConfig replicationConfig, Map metadata) throws IOException { + return createStreamKey(volumeName, bucketName, keyName, size, replicationConfig, + metadata, Collections.emptyMap()); + } + + @Override + public OzoneDataStreamOutput createStreamKey( + String volumeName, String bucketName, String keyName, long size, + ReplicationConfig replicationConfig, + Map metadata, Map tags) throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); if (checkKeyNameEnabled) { HddsClientUtils.verifyKeyName(keyName); } - HddsClientUtils.checkNotNull(keyName, replicationConfig); + HddsClientUtils.checkNotNull(keyName); + + if (omVersion.compareTo(OzoneManagerVersion.OBJECT_TAG) < 0) { + if (tags != null && !tags.isEmpty()) { + throw new IOException("OzoneManager does not support object tags"); + } + } + + String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder() .setVolumeName(volumeName) @@ -1406,8 +1485,10 @@ public OzoneDataStreamOutput createStreamKey( .setDataSize(size) .setReplicationConfig(replicationConfig) .addAllMetadataGdpr(metadata) + .addAllTags(tags) .setSortDatanodesInPipeline(true) - .setAcls(getAclList()); + .setAcls(getAclList()) + .setOwnerName(ownerName); OpenKeySession openKey = ozoneManagerClient.openKey(builder.build()); return createDataStreamOutput(openKey); @@ -1520,6 +1601,7 @@ public OzoneInputStream getKey( .setUpdateID(keyInfo.getUpdateID()) .setParentObjectID(keyInfo.getParentObjectID()) .setFileChecksum(keyInfo.getFileChecksum()) + .setOwnerName(keyInfo.getOwnerName()) .build(); dnKeyInfo.setMetadata(keyInfo.getMetadata()); dnKeyInfo.setKeyLocationVersions(keyLocationInfoGroups); @@ -1609,20 +1691,21 @@ public List listKeys(String volumeName, String bucketName, key.getCreationTime(), key.getModificationTime(), key.getReplicationConfig(), - key.isFile())) + key.isFile(), + key.getOwnerName())) .collect(Collectors.toList()); } else { List keys = ozoneManagerClient.listKeys( volumeName, bucketName, prevKey, keyPrefix, maxListResult).getKeys(); - return keys.stream().map(key -> new OzoneKey( - key.getVolumeName(), + return keys.stream().map(key -> new OzoneKey(key.getVolumeName(), key.getBucketName(), key.getKeyName(), key.getDataSize(), key.getCreationTime(), key.getModificationTime(), key.getReplicationConfig(), - key.isFile())) + key.isFile(), + key.getOwnerName())) .collect(Collectors.toList()); } } @@ -1673,7 +1756,8 @@ private OzoneKeyDetails getOzoneKeyDetails(OmKeyInfo keyInfo) { keyInfo.getModificationTime(), ozoneKeyLocations, keyInfo.getReplicationConfig(), keyInfo.getMetadata(), keyInfo.getFileEncryptionInfo(), - () -> getInputStreamWithRetryFunction(keyInfo), keyInfo.isFile()); + () -> getInputStreamWithRetryFunction(keyInfo), keyInfo.isFile(), + keyInfo.getOwnerName(), keyInfo.getTags()); } @Override @@ -1752,9 +1836,11 @@ private OmKeyInfo getKeyInfo(OmKeyArgs keyArgs) throws IOException { @Override public void close() throws IOException { - if (ecReconstructExecutor != null) { - ecReconstructExecutor.shutdownNow(); - ecReconstructExecutor = null; + if (ecReconstructExecutor.isInitialized()) { + ecReconstructExecutor.get().shutdownNow(); + } + if (writeExecutor.isInitialized()) { + writeExecutor.get().shutdownNow(); } IOUtils.cleanupWithLogger(LOG, ozoneManagerClient, xceiverClientManager); keyProviderCache.invalidateAll(); @@ -1777,24 +1863,58 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, String keyName, ReplicationConfig replicationConfig) throws IOException { + return initiateMultipartUpload(volumeName, bucketName, keyName, replicationConfig, + Collections.emptyMap()); + } + + @Override + public OmMultipartInfo initiateMultipartUpload(String volumeName, + String bucketName, + String keyName, + ReplicationConfig replicationConfig, + Map metadata) + throws IOException { + return initiateMultipartUpload(volumeName, bucketName, keyName, replicationConfig, + metadata, Collections.emptyMap()); + } + + @Override + public OmMultipartInfo initiateMultipartUpload(String volumeName, + String bucketName, + String keyName, + ReplicationConfig replicationConfig, + Map metadata, + Map tags) + throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); HddsClientUtils.checkNotNull(keyName); + String ownerName = getRealUserInfo().getShortUserName(); if (omVersion .compareTo(OzoneManagerVersion.ERASURE_CODED_STORAGE_SUPPORT) < 0) { - if (replicationConfig.getReplicationType() + if (replicationConfig != null && replicationConfig.getReplicationType() == HddsProtos.ReplicationType.EC) { throw new IOException("Can not set the replication of the file to" + " Erasure Coded replication, as OzoneManager does not support" + " Erasure Coded replication."); } } + + if (omVersion.compareTo(OzoneManagerVersion.OBJECT_TAG) < 0) { + if (tags != null && !tags.isEmpty()) { + throw new IOException("OzoneManager does not support object tags"); + } + } + OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) .setReplicationConfig(replicationConfig) .setAcls(getAclList()) + .addAllMetadataGdpr(metadata) + .setOwnerName(ownerName) + .addAllTags(tags) .build(); OmMultipartInfo multipartInfo = ozoneManagerClient .initiateMultipartUpload(keyArgs); @@ -1818,6 +1938,7 @@ private OpenKeySession newMultipartOpenKey( } Preconditions.checkArgument(size >= 0, "size should be greater than or " + "equal to zero"); + String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -1828,6 +1949,7 @@ private OpenKeySession newMultipartOpenKey( .setMultipartUploadPartNumber(partNumber) .setSortDatanodesInPipeline(sortDatanodesInPipeline) .setAcls(getAclList()) + .setOwnerName(ownerName) .build(); return ozoneManagerClient.openKey(keyArgs); } @@ -1838,11 +1960,16 @@ public OzoneOutputStream createMultipartKey( long size, int partNumber, String uploadID) throws IOException { final OpenKeySession openKey = newMultipartOpenKey( volumeName, bucketName, keyName, size, partNumber, uploadID, false); + return createMultipartOutputStream(openKey, uploadID, partNumber); + } + + private OzoneOutputStream createMultipartOutputStream( + OpenKeySession openKey, String uploadID, int partNumber + ) throws IOException { KeyOutputStream keyOutputStream = createKeyOutputStream(openKey) .setMultipartNumber(partNumber) .setMultipartUploadID(uploadID) .setIsMultipartKey(true) - .setAtomicKeyCreation(isS3GRequest.get()) .build(); return createOutputStream(openKey, keyOutputStream); } @@ -1858,29 +1985,25 @@ public OzoneDataStreamOutput createMultipartStreamKey( throws IOException { final OpenKeySession openKey = newMultipartOpenKey( volumeName, bucketName, keyName, size, partNumber, uploadID, true); - // Amazon S3 never adds partial objects, So for S3 requests we need to - // set atomicKeyCreation to true - // refer: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html - KeyDataStreamOutput keyOutputStream = - new KeyDataStreamOutput.Builder() - .setHandler(openKey) - .setXceiverClientManager(xceiverClientManager) - .setOmClient(ozoneManagerClient) - .setReplicationConfig(openKey.getKeyInfo().getReplicationConfig()) - .setMultipartNumber(partNumber) - .setMultipartUploadID(uploadID) - .setIsMultipartKey(true) - .enableUnsafeByteBufferConversion(unsafeByteBufferConversion) - .setConfig(clientConfig) - .setAtomicKeyCreation(isS3GRequest.get()) - .build(); - keyOutputStream - .addPreallocateBlocks( - openKey.getKeyInfo().getLatestVersionLocations(), - openKey.getOpenVersion()); - final OzoneOutputStream out = createSecureOutputStream( - openKey, keyOutputStream, null); - return new OzoneDataStreamOutput(out != null ? out : keyOutputStream); + final ByteBufferStreamOutput out; + ReplicationConfig replicationConfig = openKey.getKeyInfo().getReplicationConfig(); + if (replicationConfig.getReplicationType() == HddsProtos.ReplicationType.RATIS) { + KeyDataStreamOutput keyOutputStream = newKeyOutputStreamBuilder() + .setHandler(openKey) + .setReplicationConfig(replicationConfig) + .setMultipartNumber(partNumber) + .setMultipartUploadID(uploadID) + .setIsMultipartKey(true) + .build(); + keyOutputStream.addPreallocateBlocks( + openKey.getKeyInfo().getLatestVersionLocations(), + openKey.getOpenVersion()); + final OzoneOutputStream secureOut = createSecureOutputStream(openKey, keyOutputStream, null); + out = secureOut != null ? secureOut : keyOutputStream; + } else { + out = createMultipartOutputStream(openKey, uploadID, partNumber); + } + return new OzoneDataStreamOutput(out); } @Override @@ -1890,6 +2013,7 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( verifyVolumeName(volumeName); verifyBucketName(bucketName); HddsClientUtils.checkNotNull(keyName, uploadID); + String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) @@ -1897,6 +2021,7 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setKeyName(keyName) .setMultipartUploadID(uploadID) .setAcls(getAclList()) + .setOwnerName(ownerName) .build(); OmMultipartUploadCompleteList @@ -1952,7 +2077,8 @@ public OzoneMultipartUploadPartListParts listParts(String volumeName, ozoneMultipartUploadPartListParts.addPart( new OzoneMultipartUploadPartListParts.PartInfo( omPartInfo.getPartNumber(), omPartInfo.getPartName(), - omPartInfo.getModificationTime(), omPartInfo.getSize())); + omPartInfo.getModificationTime(), omPartInfo.getSize(), + omPartInfo.getETag())); } return ozoneMultipartUploadPartListParts; @@ -1993,10 +2119,12 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, @Override public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { + String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) .setAcls(getAclList()) + .setOwnerName(ownerName) .build(); ozoneManagerClient.createDirectory(keyArgs); } @@ -2069,6 +2197,7 @@ public OzoneOutputStream createFile(String volumeName, String bucketName, + " Erasure Coded replication."); } } + String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -2077,6 +2206,7 @@ public OzoneOutputStream createFile(String volumeName, String bucketName, .setReplicationConfig(replicationConfig) .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) + .setOwnerName(ownerName) .build(); OpenKeySession keySession = ozoneManagerClient.createFile(keyArgs, overWrite, recursive); @@ -2099,6 +2229,7 @@ public OzoneDataStreamOutput createStreamFile(String volumeName, String bucketName, String keyName, long size, ReplicationConfig replicationConfig, boolean overWrite, boolean recursive) throws IOException { + String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -2108,6 +2239,7 @@ public OzoneDataStreamOutput createStreamFile(String volumeName, .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setSortDatanodesInPipeline(true) + .setOwnerName(ownerName) .build(); OpenKeySession keySession = ozoneManagerClient.createFile(keyArgs, overWrite, recursive); @@ -2224,9 +2356,8 @@ private OzoneInputStream createInputStream( if (feInfo == null) { LengthInputStream lengthInputStream = KeyInputStream - .getFromOmKeyInfo(keyInfo, xceiverClientManager, - clientConfig.isChecksumVerify(), retryFunction, - blockInputStreamFactory); + .getFromOmKeyInfo(keyInfo, xceiverClientManager, retryFunction, + blockInputStreamFactory, clientConfig); try { final GDPRSymmetricKey gk = getGDPRSymmetricKey( keyInfo.getMetadata(), Cipher.DECRYPT_MODE); @@ -2241,9 +2372,8 @@ private OzoneInputStream createInputStream( } else if (!keyInfo.getLatestVersionLocations().isMultipartKey()) { // Regular Key with FileEncryptionInfo LengthInputStream lengthInputStream = KeyInputStream - .getFromOmKeyInfo(keyInfo, xceiverClientManager, - clientConfig.isChecksumVerify(), retryFunction, - blockInputStreamFactory); + .getFromOmKeyInfo(keyInfo, xceiverClientManager, retryFunction, + blockInputStreamFactory, clientConfig); final KeyProvider.KeyVersion decrypted = getDEK(feInfo); final CryptoInputStream cryptoIn = new CryptoInputStream(lengthInputStream.getWrappedStream(), @@ -2253,9 +2383,8 @@ private OzoneInputStream createInputStream( } else { // Multipart Key with FileEncryptionInfo List lengthInputStreams = KeyInputStream - .getStreamsFromKeyInfo(keyInfo, xceiverClientManager, - clientConfig.isChecksumVerify(), retryFunction, - blockInputStreamFactory); + .getStreamsFromKeyInfo(keyInfo, xceiverClientManager, retryFunction, + blockInputStreamFactory, clientConfig); final KeyProvider.KeyVersion decrypted = getDEK(feInfo); List cryptoInputStreams = new ArrayList<>(); @@ -2276,25 +2405,33 @@ private OzoneDataStreamOutput createDataStreamOutput(OpenKeySession openKey) throws IOException { final ReplicationConfig replicationConfig = openKey.getKeyInfo().getReplicationConfig(); + final ByteBufferStreamOutput out; + if (replicationConfig.getReplicationType() == HddsProtos.ReplicationType.RATIS) { + KeyDataStreamOutput keyOutputStream = newKeyOutputStreamBuilder() + .setHandler(openKey) + .setReplicationConfig(replicationConfig) + .build(); + keyOutputStream.addPreallocateBlocks( + openKey.getKeyInfo().getLatestVersionLocations(), + openKey.getOpenVersion()); + final OzoneOutputStream secureOut = createSecureOutputStream(openKey, keyOutputStream, null); + out = secureOut != null ? secureOut : keyOutputStream; + } else { + out = createOutputStream(openKey); + } + return new OzoneDataStreamOutput(out); + } + + private KeyDataStreamOutput.Builder newKeyOutputStreamBuilder() { // Amazon S3 never adds partial objects, So for S3 requests we need to // set atomicKeyCreation to true // refer: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html - KeyDataStreamOutput keyOutputStream = - new KeyDataStreamOutput.Builder() - .setHandler(openKey) - .setXceiverClientManager(xceiverClientManager) - .setOmClient(ozoneManagerClient) - .setReplicationConfig(replicationConfig) - .enableUnsafeByteBufferConversion(unsafeByteBufferConversion) - .setConfig(clientConfig) - .setAtomicKeyCreation(isS3GRequest.get()) - .build(); - keyOutputStream - .addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(), - openKey.getOpenVersion()); - final OzoneOutputStream out = createSecureOutputStream( - openKey, keyOutputStream, null); - return new OzoneDataStreamOutput(out != null ? out : keyOutputStream); + return new KeyDataStreamOutput.Builder() + .setXceiverClientManager(xceiverClientManager) + .setOmClient(ozoneManagerClient) + .enableUnsafeByteBufferConversion(unsafeByteBufferConversion) + .setConfig(clientConfig) + .setAtomicKeyCreation(isS3GRequest.get()); } private OzoneOutputStream createOutputStream(OpenKeySession openKey) @@ -2374,6 +2511,7 @@ private KeyOutputStream.Builder createKeyOutputStream( .setConfig(clientConfig) .setAtomicKeyCreation(isS3GRequest.get()) .setClientMetrics(clientMetrics) + .setExecutorServiceSupplier(writeExecutor) .setStreamBufferArgs(streamBufferArgs); } @@ -2450,6 +2588,7 @@ public OzoneKey headS3Object(String bucketName, String keyName) public void setThreadLocalS3Auth( S3Auth ozoneSharedSecretAuth) { ozoneManagerClient.setThreadLocalS3Auth(ozoneSharedSecretAuth); + this.s3gUgi = UserGroupInformation.createRemoteUser(getThreadLocalS3Auth().getUserPrincipal()); } @Override @@ -2495,26 +2634,11 @@ public void setTimes(OzoneObj obj, String keyName, long mtime, long atime) ozoneManagerClient.setTimes(builder.build(), mtime, atime); } - public ExecutorService getECReconstructExecutor() { - // local ref to a volatile to ensure access - // to a completed initialized object - ExecutorService executor = ecReconstructExecutor; - if (executor == null) { - synchronized (this) { - executor = ecReconstructExecutor; - if (executor == null) { - ecReconstructExecutor = new ThreadPoolExecutor( - EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, - clientConfig.getEcReconstructStripeReadPoolLimit(), - 60, TimeUnit.SECONDS, new SynchronousQueue<>(), - new ThreadFactoryBuilder() - .setNameFormat("ec-reconstruct-reader-TID-%d") - .build(), - new ThreadPoolExecutor.CallerRunsPolicy()); - executor = ecReconstructExecutor; - } - } - } - return executor; + private static ExecutorService createThreadPoolExecutor( + int corePoolSize, int maximumPoolSize, String threadNameFormat) { + return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, + 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat(threadNameFormat).setDaemon(true).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockDatanodeStorage.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockDatanodeStorage.java index 393e8cdb3112..caa3996a09ff 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockDatanodeStorage.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockDatanodeStorage.java @@ -162,10 +162,10 @@ public void writeChunk( if (data.containsKey(blockKey)) { block = data.get(blockKey); assert block.size() == chunkInfo.getOffset(); - data.put(blockKey, block.concat(bytes)); + data.put(blockKey, block.concat(ByteString.copyFrom(bytes.asReadOnlyByteBuffer()))); } else { assert chunkInfo.getOffset() == 0; - data.put(blockKey, bytes); + data.put(blockKey, ByteString.copyFrom(bytes.asReadOnlyByteBuffer())); } fullBlockData diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java index 7e5de329d129..0d82f0f8bbb2 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java @@ -129,21 +129,26 @@ private ContainerProtos.ListBlockResponseProto listBlock(long containerID) { } private PutBlockResponseProto putBlock(PutBlockRequestProto putBlock) { + return PutBlockResponseProto.newBuilder() + .setCommittedBlockLength( + doPutBlock(putBlock.getBlockData())) + .build(); + } + + private GetCommittedBlockLengthResponseProto doPutBlock( + ContainerProtos.BlockData blockData) { long length = 0; - for (ChunkInfo chunk : putBlock.getBlockData().getChunksList()) { + for (ChunkInfo chunk : blockData.getChunksList()) { length += chunk.getLen(); } - datanodeStorage.putBlock(putBlock.getBlockData().getBlockID(), - putBlock.getBlockData()); + datanodeStorage.putBlock(blockData.getBlockID(), + blockData); - return PutBlockResponseProto.newBuilder() - .setCommittedBlockLength( - GetCommittedBlockLengthResponseProto.newBuilder() - .setBlockID(putBlock.getBlockData().getBlockID()) + return GetCommittedBlockLengthResponseProto.newBuilder() + .setBlockID(blockData.getBlockID()) .setBlockLength(length) - .build()) - .build(); + .build(); } private XceiverClientReply result( @@ -166,8 +171,15 @@ private WriteChunkResponseProto writeChunk( datanodeStorage .writeChunk(writeChunk.getBlockID(), writeChunk.getChunkData(), writeChunk.getData()); - return WriteChunkResponseProto.newBuilder() - .build(); + + WriteChunkResponseProto.Builder builder = + WriteChunkResponseProto.newBuilder(); + if (writeChunk.hasBlock()) { + ContainerProtos.BlockData + blockData = writeChunk.getBlock().getBlockData(); + builder.setCommittedBlockLength(doPutBlock(blockData)); + } + return builder.build(); } @Override diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java index 7760e88e484a..718e724e5854 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java @@ -63,10 +63,10 @@ public class TestECBlockOutputStreamEntry { try (XceiverClientManager manager = new XceiverClientManager(new OzoneConfiguration())) { HashSet clients = new HashSet<>(); - ECBlockOutputStreamEntry entry = new ECBlockOutputStreamEntry.Builder() - .setXceiverClientManager(manager) - .setPipeline(anECPipeline) - .build(); + final ECBlockOutputStreamEntry.Builder b = new ECBlockOutputStreamEntry.Builder(); + b.setXceiverClientManager(manager) + .setPipeline(anECPipeline); + final ECBlockOutputStreamEntry entry = b.build(); for (int i = 0; i < nodes.size(); i++) { clients.add( manager.acquireClient( @@ -101,10 +101,10 @@ public class TestECBlockOutputStreamEntry { try (XceiverClientManager manager = new XceiverClientManager(new OzoneConfiguration())) { HashSet clients = new HashSet<>(); - ECBlockOutputStreamEntry entry = new ECBlockOutputStreamEntry.Builder() - .setXceiverClientManager(manager) - .setPipeline(anECPipeline) - .build(); + final ECBlockOutputStreamEntry.Builder b = new ECBlockOutputStreamEntry.Builder(); + b.setXceiverClientManager(manager) + .setPipeline(anECPipeline); + final ECBlockOutputStreamEntry entry = b.build(); for (int i = 0; i < nodes.size(); i++) { clients.add( manager.acquireClient( diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java index 6af5c4b4e0d8..4d4a1ab4cbae 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java @@ -20,8 +20,10 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -39,7 +41,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.MB; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -49,6 +50,8 @@ */ public class TestKeyInputStreamEC { + private OzoneConfiguration conf = new OzoneConfiguration(); + @Test public void testReadAgainstLargeBlockGroup() throws IOException { int dataBlocks = 10; @@ -68,10 +71,13 @@ public void testReadAgainstLargeBlockGroup() throws IOException { BlockInputStreamFactory mockStreamFactory = mock(BlockInputStreamFactory.class); when(mockStreamFactory.create(any(), any(), any(), any(), - anyBoolean(), any(), any())).thenReturn(blockInputStream); + any(), any(), any())).thenReturn(blockInputStream); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (LengthInputStream kis = KeyInputStream.getFromOmKeyInfo(keyInfo, - null, true, null, mockStreamFactory)) { + null, null, mockStreamFactory, + clientConfig)) { byte[] buf = new byte[100]; int readBytes = kis.read(buf, 0, 100); assertEquals(100, readBytes); diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 4af3fb18523d..a925df56a9f4 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -86,11 +86,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - org.mockito - mockito-inline - test - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index c6e410bb45bd..d2f68f1e4d81 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -320,6 +320,7 @@ public static boolean isReadOnly( case SetRangerServiceVersion: case CreateSnapshot: case DeleteSnapshot: + case RenameSnapshot: case SnapshotMoveDeletedKeys: case SnapshotPurge: case RecoverLease: @@ -743,6 +744,47 @@ public static String normalizeKey(String keyName, return keyName; } + /** + * Normalizes a given path up to the bucket level. + * + * This method takes a path as input and normalises uptil the bucket level. + * It handles empty, removes leading slashes, and splits the path into + * segments. It then extracts the volume and bucket names, forming a + * normalized path with a single slash. Finally, any remaining segments are + * joined as the key name, returning the complete standardized path. + * + * @param path The path string to be normalized. + * @return The normalized path string. + */ + public static String normalizePathUptoBucket(String path) { + if (path == null || path.isEmpty()) { + return OM_KEY_PREFIX; // Handle empty path + } + + // Remove leading slashes + path = path.replaceAll("^/*", ""); + + String[] segments = path.split(OM_KEY_PREFIX, -1); + + String volumeName = segments[0]; + String bucketName = segments.length > 1 ? segments[1] : ""; + + // Combine volume and bucket. + StringBuilder normalizedPath = new StringBuilder(volumeName); + if (!bucketName.isEmpty()) { + normalizedPath.append(OM_KEY_PREFIX).append(bucketName); + } + + // Add remaining segments as the key + if (segments.length > 2) { + normalizedPath.append(OM_KEY_PREFIX).append( + String.join(OM_KEY_PREFIX, + Arrays.copyOfRange(segments, 2, segments.length))); + } + + return normalizedPath.toString(); + } + /** * For a given service ID, return list of configured OM hosts. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 7ca0634949c0..26693d19c64a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -19,19 +19,30 @@ package org.apache.hadoop.ozone; -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ByteString; +import com.google.protobuf.Proto2Utils; +import net.jcip.annotations.Immutable; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.ratis.util.MemoizedSupplier; import java.util.ArrayList; import java.util.BitSet; +import java.util.Collections; +import java.util.EnumSet; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; +import java.util.function.Function; +import java.util.function.IntFunction; +import java.util.function.Supplier; + +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; /** * OzoneACL classes define bucket ACLs used in OZONE. @@ -43,93 +54,96 @@ *

  • world::rw * */ -@JsonIgnoreProperties(value = {"aclBitSet"}) +@Immutable public class OzoneAcl { private static final String ACL_SCOPE_REGEX = ".*\\[(ACCESS|DEFAULT)\\]"; - private ACLIdentityType type; - private String name; - private BitSet aclBitSet; - private AclScope aclScope; - private static final List EMPTY_LIST = new ArrayList<>(0); - public static final BitSet ZERO_BITSET = new BitSet(0); + private final ACLIdentityType type; + private final String name; + @JsonIgnore + private final int aclBits; + private final AclScope aclScope; + + @JsonIgnore + private final Supplier toStringMethod; + @JsonIgnore + private final Supplier hashCodeMethod; + + public OzoneAcl(ACLIdentityType type, String name, AclScope scope, ACLType... acls) { + this(type, name, scope, toInt(acls)); + } - /** - * Default constructor. - */ - public OzoneAcl() { + public OzoneAcl(ACLIdentityType type, String name, AclScope scope, EnumSet acls) { + this(type, name, scope, toInt(acls)); } - /** - * Constructor for OzoneAcl. - * - * @param type - Type - * @param name - Name of user - * @param acl - Rights - * @param scope - AclScope - */ - public OzoneAcl(ACLIdentityType type, String name, ACLType acl, - AclScope scope) { - this.name = name; - this.aclBitSet = new BitSet(ACLType.getNoOfAcls()); - aclBitSet.set(acl.ordinal(), true); + private OzoneAcl(ACLIdentityType type, String name, AclScope scope, int acls) { + this.name = validateNameAndType(type, name); this.type = type; - if (type == ACLIdentityType.WORLD || type == ACLIdentityType.ANONYMOUS) { - if (!name.equals(ACLIdentityType.WORLD.name()) && - !name.equals(ACLIdentityType.ANONYMOUS.name()) && - name.length() != 0) { - throw new IllegalArgumentException("Unexpected name:{" + name + - "} for type WORLD, ANONYMOUS. It should be WORLD & " + - "ANONYMOUS respectively."); - } - // For type WORLD and ANONYMOUS we allow only one acl to be set. - this.name = type.name(); + this.aclScope = scope; + this.aclBits = acls; + + this.toStringMethod = MemoizedSupplier.valueOf(() -> getType() + ":" + getName() + ":" + + ACLType.getACLString(BitSet.valueOf(getAclByteString().asReadOnlyByteBuffer())) + "[" + getAclScope() + "]"); + this.hashCodeMethod = MemoizedSupplier.valueOf(() -> Objects.hash(getName(), + BitSet.valueOf(getAclByteString().asReadOnlyByteBuffer()), getType().toString(), getAclScope())); + } + + + private static int toInt(int aclTypeOrdinal) { + return 1 << aclTypeOrdinal; + } + + private static int toInt(ACLType acl) { + return toInt(acl.ordinal()); + } + + private static int toInt(ACLType[] acls) { + if (acls == null) { + return 0; } - if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP)) - && (name.length() == 0)) { - throw new IllegalArgumentException("User or group name is required"); + int value = 0; + for (ACLType acl : acls) { + value |= toInt(acl); } - aclScope = scope; + return value; } - /** - * Constructor for OzoneAcl. - * - * @param type - Type - * @param name - Name of user - * @param acls - Rights - * @param scope - AclScope - */ - public OzoneAcl(ACLIdentityType type, String name, BitSet acls, - AclScope scope) { - Objects.requireNonNull(type); - Objects.requireNonNull(acls); - - if (acls.cardinality() > ACLType.getNoOfAcls()) { - throw new IllegalArgumentException("Acl bitset passed has unexpected " + - "size. bitset size:" + acls.cardinality() + ", bitset:" - + acls.toString()); + private static int toInt(Iterable acls) { + if (acls == null) { + return 0; + } + int value = 0; + for (ACLType acl : acls) { + value |= toInt(acl); } - this.aclBitSet = (BitSet) acls.clone(); + return value; + } + + private static String validateNameAndType(ACLIdentityType type, String name) { + Objects.requireNonNull(type); - this.name = name; - this.type = type; if (type == ACLIdentityType.WORLD || type == ACLIdentityType.ANONYMOUS) { if (!name.equals(ACLIdentityType.WORLD.name()) && !name.equals(ACLIdentityType.ANONYMOUS.name()) && name.length() != 0) { - throw new IllegalArgumentException("Unexpected name:{" + name + - "} for type WORLD, ANONYMOUS. It should be WORLD & " + - "ANONYMOUS respectively."); + throw new IllegalArgumentException("Expected name " + type.name() + ", but was: " + name); } // For type WORLD and ANONYMOUS we allow only one acl to be set. - this.name = type.name(); + return type.name(); } + if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP)) && (name.length() == 0)) { - throw new IllegalArgumentException("User or group name is required"); + throw new IllegalArgumentException(type + " name is required"); } - aclScope = scope; + + return name; + } + + public OzoneAcl withScope(final AclScope scope) { + return scope == aclScope ? this + : new OzoneAcl(type, name, scope, aclBits); } /** @@ -151,7 +165,6 @@ public static OzoneAcl parseAcl(String acl) } ACLIdentityType aclType = ACLIdentityType.valueOf(parts[0].toUpperCase()); - BitSet acls = new BitSet(ACLType.getNoOfAcls()); String bits = parts[2]; @@ -166,14 +179,14 @@ public static OzoneAcl parseAcl(String acl) parts[2].indexOf("]"))); } - // Set all acl bits. + EnumSet acls = EnumSet.noneOf(ACLType.class); for (char ch : bits.toCharArray()) { - acls.set(ACLType.getACLRight(String.valueOf(ch)).ordinal()); + acls.add(ACLType.getACLRight(String.valueOf(ch))); } // TODO : Support sanitation of these user names by calling into // userAuth Interface. - return new OzoneAcl(aclType, parts[1], acls, aclScope); + return new OzoneAcl(aclType, parts[1], aclScope, acls); } /** @@ -205,44 +218,21 @@ public static OzoneAclInfo toProtobuf(OzoneAcl acl) { .setName(acl.getName()) .setType(OzoneAclType.valueOf(acl.getType().name())) .setAclScope(OzoneAclScope.valueOf(acl.getAclScope().name())) - .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray())); + .setRights(acl.getAclByteString()); return builder.build(); } public static OzoneAcl fromProtobuf(OzoneAclInfo protoAcl) { - BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray()); - return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), - protoAcl.getName(), aclRights, - AclScope.valueOf(protoAcl.getAclScope().name())); - } - - /** - * Helper function to convert a proto message of type {@link OzoneAclInfo} - * to {@link OzoneAcl} with acl scope of type ACCESS. - * - * @param protoAcl - * @return OzoneAcl - * */ - public static OzoneAcl fromProtobufWithAccessType(OzoneAclInfo protoAcl) { - BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray()); - return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), - protoAcl.getName(), aclRights, AclScope.ACCESS); - } - - /** - * Helper function to convert an {@link OzoneAcl} to proto message of type - * {@link OzoneAclInfo} with acl scope of type ACCESS. - * - * @param acl - * @return OzoneAclInfo - * */ - public static OzoneAclInfo toProtobufWithAccessType(OzoneAcl acl) { - OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder() - .setName(acl.getName()) - .setType(OzoneAclType.valueOf(acl.getType().name())) - .setAclScope(OzoneAclScope.ACCESS) - .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray())); - return builder.build(); + final byte[] bytes = protoAcl.getRights().toByteArray(); + if (bytes.length > 4) { + throw new AssertionError("Expected at most 4 bytes but got " + bytes.length); + } + int aclRights = 0; + for (int i = 0; i < bytes.length; i++) { + aclRights |= (bytes[i] & 0xff) << (i * 8); + } + return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), protoAcl.getName(), + AclScope.valueOf(protoAcl.getAclScope().name()), aclRights); } public AclScope getAclScope() { @@ -251,8 +241,7 @@ public AclScope getAclScope() { @Override public String toString() { - return type + ":" + name + ":" + ACLType.getACLString(aclBitSet) - + "[" + aclScope + "]"; + return toStringMethod.get(); } /** @@ -266,8 +255,7 @@ public String toString() { */ @Override public int hashCode() { - return Objects.hash(this.getName(), this.getAclBitSet(), - this.getType().toString(), this.getAclScope()); + return hashCodeMethod.get(); } /** @@ -279,21 +267,64 @@ public String getName() { return name; } - /** - * Returns Rights. - * - * @return - Rights - */ - public BitSet getAclBitSet() { - return aclBitSet; + @JsonIgnore + public boolean isEmpty() { + return aclBits == 0; + } + + @VisibleForTesting + public boolean isSet(ACLType acl) { + return (aclBits & toInt(acl)) != 0; + } + + public boolean checkAccess(ACLType acl) { + return (isSet(acl) || isSet(ALL)) && !isSet(NONE); + } + + public OzoneAcl add(OzoneAcl other) { + return apply(bits -> bits | other.aclBits); + } + + public OzoneAcl remove(OzoneAcl other) { + return apply(bits -> bits & ~other.aclBits); + } + + private OzoneAcl apply(IntFunction op) { + int applied = op.apply(aclBits); + return applied == aclBits + ? this + : new OzoneAcl(type, name, aclScope, applied); + } + + @JsonIgnore + public ByteString getAclByteString() { + // only first 9 bits are used currently + final byte first = (byte) aclBits; + final byte second = (byte) (aclBits >>> 8); + final byte[] bytes = second != 0 ? new byte[]{first, second} : new byte[]{first}; + return Proto2Utils.unsafeByteString(bytes); + } + + @JsonIgnore + public List getAclStringList() { + return getAclList(aclBits, ACLType::name); } public List getAclList() { - if (aclBitSet != null) { - return aclBitSet.stream().mapToObj(a -> - ACLType.values()[a]).collect(Collectors.toList()); + return getAclList(aclBits, Function.identity()); + } + + private static List getAclList(int aclBits, Function converter) { + if (aclBits == 0) { + return Collections.emptyList(); + } + final List toReturn = new ArrayList<>(Integer.bitCount(aclBits)); + for (int i = 0; i < ACLType.values().length; i++) { + if ((toInt(i) & aclBits) != 0) { + toReturn.add(converter.apply(ACLType.values()[i])); + } } - return EMPTY_LIST; + return Collections.unmodifiableList(toReturn); } /** @@ -315,24 +346,19 @@ public ACLIdentityType getType() { */ @Override public boolean equals(Object obj) { - if (obj == null) { - return false; + if (obj == this) { + return true; } - if (getClass() != obj.getClass()) { + if (obj == null || getClass() != obj.getClass()) { return false; } OzoneAcl otherAcl = (OzoneAcl) obj; return otherAcl.getName().equals(this.getName()) && otherAcl.getType().equals(this.getType()) && - otherAcl.getAclBitSet().equals(this.getAclBitSet()) && + this.aclBits == otherAcl.aclBits && otherAcl.getAclScope().equals(this.getAclScope()); } - public OzoneAcl setAclScope(AclScope scope) { - this.aclScope = scope; - return this; - } - /** * Scope of ozone acl. * */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index ec001587de54..da7dab5c6838 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -28,21 +28,9 @@ * Ozone Manager Constants. */ public final class OMConfigKeys { - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE = - "ozone.om.snapshot.sst_dumptool.pool.size"; - public static final int - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT = 1; - public static final String OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB = "ozone.om.snapshot.load.native.lib"; public static final boolean OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT = true; - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE = - "ozone.om.snapshot.sst_dumptool.buffer.size"; - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT = "8KB"; - /** * Never constructed. */ @@ -264,6 +252,10 @@ private OMConfigKeys() { public static final boolean OZONE_OM_RATIS_SERVER_ELECTION_PRE_VOTE_DEFAULT = true; + public static final String OZONE_OM_RATIS_SERVER_CLOSE_THRESHOLD_KEY = + "ozone.om.ratis.server.close.threshold"; + public static final TimeDuration OZONE_OM_RATIS_SERVER_CLOSE_THRESHOLD_DEFAULT = + TimeDuration.valueOf(60, TimeUnit.SECONDS); // OM SnapshotProvider configurations public static final String OZONE_OM_RATIS_SNAPSHOT_DIR = @@ -294,6 +286,8 @@ private OMConfigKeys() { + "kerberos.keytab.file"; public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om" + ".kerberos.principal"; + public static final String OZONE_OM_KERBEROS_PRINCIPAL_PATTERN_KEY = + "ozone.om.kerberos.principal.pattern"; public static final String OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE = "ozone.om.http.auth.kerberos.keytab"; public static final String OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY @@ -584,13 +578,20 @@ private OMConfigKeys() { = TimeUnit.DAYS.toMillis(7); public static final String OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL - = "ozone.om.snapshot.diff.cleanup.service.run.internal"; + = "ozone.om.snapshot.diff.cleanup.service.run.interval"; + public static final String + OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL + = "ozone.om.snapshot.cache.cleanup.service.run.interval"; public static final long OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL_DEFAULT = TimeUnit.MINUTES.toMillis(1); + public static final long + OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL_DEFAULT + = TimeUnit.MINUTES.toMillis(1); public static final String OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT = "ozone.om.snapshot.diff.cleanup.service.timeout"; + public static final long OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT_DEFAULT = TimeUnit.MINUTES.toMillis(5); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java index 9c9a5027774f..a9fa742a108d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java @@ -20,37 +20,54 @@ import java.io.IOException; import java.util.Objects; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BasicKeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; + /** * Lightweight OmKeyInfo class. */ -public class BasicOmKeyInfo { - - private String volumeName; - private String bucketName; - private String keyName; - private long dataSize; - private long creationTime; - private long modificationTime; - private ReplicationConfig replicationConfig; - private boolean isFile; - - @SuppressWarnings("parameternumber") - public BasicOmKeyInfo(String volumeName, String bucketName, String keyName, - long dataSize, long creationTime, long modificationTime, - ReplicationConfig replicationConfig, boolean isFile) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.replicationConfig = replicationConfig; - this.isFile = isFile; +public final class BasicOmKeyInfo { + + private final String volumeName; + private final String bucketName; + private final String keyName; + private final long dataSize; + private final long creationTime; + private final long modificationTime; + private final ReplicationConfig replicationConfig; + private final boolean isFile; + private final String eTag; + private String ownerName; + + private BasicOmKeyInfo(Builder b) { + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.dataSize = b.dataSize; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.replicationConfig = b.replicationConfig; + this.isFile = b.isFile; + this.eTag = StringUtils.isNotEmpty(b.eTag) ? b.eTag : null; + this.ownerName = b.ownerName; + } + + private BasicOmKeyInfo(OmKeyInfo b) { + this.volumeName = b.getVolumeName(); + this.bucketName = b.getBucketName(); + this.keyName = b.getKeyName(); + this.dataSize = b.getDataSize(); + this.creationTime = b.getCreationTime(); + this.modificationTime = b.getModificationTime(); + this.replicationConfig = b.getReplicationConfig(); + this.isFile = b.isFile(); + this.eTag = b.getMetadata().get(ETAG); + this.ownerName = b.getOwnerName(); } public String getVolumeName() { @@ -85,6 +102,14 @@ public boolean isFile() { return isFile; } + public String getETag() { + return eTag; + } + + public String getOwnerName() { + return ownerName; + } + /** * Builder of BasicOmKeyInfo. */ @@ -97,6 +122,8 @@ public static class Builder { private long modificationTime; private ReplicationConfig replicationConfig; private boolean isFile; + private String eTag; + private String ownerName; public Builder setVolumeName(String volumeName) { this.volumeName = volumeName; @@ -138,9 +165,18 @@ public Builder setIsFile(boolean isFile) { return this; } + public Builder setETag(String etag) { + this.eTag = etag; + return this; + } + + public Builder setOwnerName(String ownerName) { + this.ownerName = ownerName; + return this; + } + public BasicOmKeyInfo build() { - return new BasicOmKeyInfo(volumeName, bucketName, keyName, dataSize, - creationTime, modificationTime, replicationConfig, isFile); + return new BasicOmKeyInfo(this); } } @@ -151,12 +187,18 @@ public BasicKeyInfo getProtobuf() { .setCreationTime(creationTime) .setModificationTime(modificationTime) .setType(replicationConfig.getReplicationType()); + if (ownerName != null) { + builder.setOwnerName(ownerName); + } if (replicationConfig instanceof ECReplicationConfig) { builder.setEcReplicationConfig( ((ECReplicationConfig) replicationConfig).toProto()); } else { builder.setFactor(ReplicationConfig.getLegacyFactor(replicationConfig)); } + if (StringUtils.isNotEmpty(eTag)) { + builder.setETag(eTag); + } return builder.build(); } @@ -181,7 +223,9 @@ public static BasicOmKeyInfo getFromProtobuf(BasicKeyInfo basicKeyInfo, basicKeyInfo.getType(), basicKeyInfo.getFactor(), basicKeyInfo.getEcReplicationConfig())) - .setIsFile(!keyName.endsWith("/")); + .setETag(basicKeyInfo.getETag()) + .setIsFile(!keyName.endsWith("/")) + .setOwnerName(basicKeyInfo.getOwnerName()); return builder.build(); } @@ -205,7 +249,9 @@ public static BasicOmKeyInfo getFromProtobuf(String volumeName, basicKeyInfo.getType(), basicKeyInfo.getFactor(), basicKeyInfo.getEcReplicationConfig())) - .setIsFile(!keyName.endsWith("/")); + .setETag(basicKeyInfo.getETag()) + .setIsFile(!keyName.endsWith("/")) + .setOwnerName(basicKeyInfo.getOwnerName()); return builder.build(); } @@ -225,7 +271,9 @@ public boolean equals(Object o) { creationTime == basicOmKeyInfo.creationTime && modificationTime == basicOmKeyInfo.modificationTime && replicationConfig.equals(basicOmKeyInfo.replicationConfig) && - isFile == basicOmKeyInfo.isFile; + Objects.equals(eTag, basicOmKeyInfo.eTag) && + isFile == basicOmKeyInfo.isFile && + ownerName.equals(basicOmKeyInfo.ownerName); } public int hashCode() { @@ -233,14 +281,6 @@ public int hashCode() { } public static BasicOmKeyInfo fromOmKeyInfo(OmKeyInfo omKeyInfo) { - return new BasicOmKeyInfo( - omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), - omKeyInfo.getKeyName(), - omKeyInfo.getDataSize(), - omKeyInfo.getCreationTime(), - omKeyInfo.getModificationTime(), - omKeyInfo.getReplicationConfig(), - omKeyInfo.isFile()); + return new BasicOmKeyInfo(omKeyInfo); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/LeaseKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/LeaseKeyInfo.java new file mode 100644 index 000000000000..a97ca6816828 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/LeaseKeyInfo.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +/** + * This class represents LeaseKeyInfo. + */ +public class LeaseKeyInfo { + private final OmKeyInfo keyInfo; + /** + * isKeyInfo = true indicates keyInfo is from keyTable. + * isKeyInfo = false indicates keyInfo is from openKeyTable. + */ + private boolean isKeyInfo; + + public LeaseKeyInfo(OmKeyInfo info, boolean isKeyInfo) { + this.keyInfo = info; + this.isKeyInfo = isKeyInfo; + } + + public boolean getIsKeyInfo() { + return this.isKeyInfo; + } + + public OmKeyInfo getKeyInfo() { + return keyInfo; + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index e382377dff45..40c28ed5adee 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -44,44 +44,40 @@ public final class OmBucketArgs extends WithMetadata implements Auditable { /** * Bucket Version flag. */ - private Boolean isVersionEnabled; + private final Boolean isVersionEnabled; /** * Type of storage to be used for this bucket. * [RAM_DISK, SSD, DISK, ARCHIVE] */ - private StorageType storageType; + private final StorageType storageType; /** * Bucket encryption key info if encryption is enabled. */ - private BucketEncryptionKeyInfo bekInfo; - private long quotaInBytes = OzoneConsts.QUOTA_RESET; - private long quotaInNamespace = OzoneConsts.QUOTA_RESET; - private boolean quotaInBytesSet = false; - private boolean quotaInNamespaceSet = false; - private DefaultReplicationConfig defaultReplicationConfig = null; + private final BucketEncryptionKeyInfo bekInfo; + private final long quotaInBytes; + private final long quotaInNamespace; + private final boolean quotaInBytesSet; + private final boolean quotaInNamespaceSet; + private final DefaultReplicationConfig defaultReplicationConfig; /** * Bucket Owner Name. */ - private String ownerName; - - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private OmBucketArgs(String volumeName, String bucketName, - Boolean isVersionEnabled, StorageType storageType, - Map metadata, String ownerName) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - this.metadata = metadata; - this.ownerName = ownerName; + private final String ownerName; + + private OmBucketArgs(Builder b) { + super(b); + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.isVersionEnabled = b.isVersionEnabled; + this.storageType = b.storageType; + this.ownerName = b.ownerName; + this.defaultReplicationConfig = b.defaultReplicationConfig; + this.quotaInBytesSet = b.quotaInBytesSet; + this.quotaInBytes = quotaInBytesSet ? b.quotaInBytes : OzoneConsts.QUOTA_RESET; + this.quotaInNamespaceSet = b.quotaInNamespaceSet; + this.quotaInNamespace = quotaInNamespaceSet ? b.quotaInNamespace : OzoneConsts.QUOTA_RESET; + this.bekInfo = b.bekInfo; } /** @@ -149,7 +145,6 @@ public boolean hasQuotaInNamespace() { /** * Returns Bucket default replication config. - * @return */ public DefaultReplicationConfig getDefaultReplicationConfig() { return defaultReplicationConfig; @@ -159,30 +154,6 @@ public BucketEncryptionKeyInfo getBucketEncryptionKeyInfo() { return bekInfo; } - /** - * Sets the Bucket default replication config. - */ - private void setDefaultReplicationConfig( - DefaultReplicationConfig defaultReplicationConfig) { - this.defaultReplicationConfig = defaultReplicationConfig; - } - - private void setQuotaInBytes(long quotaInBytes) { - this.quotaInBytesSet = true; - this.quotaInBytes = quotaInBytes; - } - - private void setQuotaInNamespace(long quotaInNamespace) { - this.quotaInNamespaceSet = true; - this.quotaInNamespace = quotaInNamespace; - } - - @Deprecated - private void setBucketEncryptionKey( - BucketEncryptionKeyInfo bucketEncryptionKey) { - this.bekInfo = bucketEncryptionKey; - } - /** * Returns Bucket Owner Name. * @@ -206,7 +177,7 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.VOLUME, this.volumeName); auditMap.put(OzoneConsts.BUCKET, this.bucketName); auditMap.put(OzoneConsts.GDPR_FLAG, - this.metadata.get(OzoneConsts.GDPR_FLAG)); + getMetadata().get(OzoneConsts.GDPR_FLAG)); auditMap.put(OzoneConsts.IS_VERSION_ENABLED, String.valueOf(this.isVersionEnabled)); if (this.storageType != null) { @@ -215,18 +186,38 @@ public Map toAuditMap() { if (this.ownerName != null) { auditMap.put(OzoneConsts.OWNER, this.ownerName); } + if (this.quotaInBytesSet && quotaInBytes > 0 || + (this.quotaInBytes != OzoneConsts.QUOTA_RESET)) { + auditMap.put(OzoneConsts.QUOTA_IN_BYTES, + String.valueOf(this.quotaInBytes)); + } + if (this.quotaInNamespaceSet && quotaInNamespace > 0 || + (this.quotaInNamespace != OzoneConsts.QUOTA_RESET)) { + auditMap.put(OzoneConsts.QUOTA_IN_NAMESPACE, + String.valueOf(this.quotaInNamespace)); + } + if (this.bekInfo != null) { + auditMap.put(OzoneConsts.BUCKET_ENCRYPTION_KEY, + this.bekInfo.getKeyName()); + } + if (this.defaultReplicationConfig != null) { + auditMap.put(OzoneConsts.REPLICATION_TYPE, String.valueOf( + this.defaultReplicationConfig.getType())); + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + this.defaultReplicationConfig.getReplicationConfig() + .getReplication()); + } return auditMap; } /** * Builder for OmBucketArgs. */ - public static class Builder { + public static class Builder extends WithMetadata.Builder { private String volumeName; private String bucketName; private Boolean isVersionEnabled; private StorageType storageType; - private Map metadata; private boolean quotaInBytesSet = false; private long quotaInBytes; private boolean quotaInNamespaceSet = false; @@ -259,12 +250,15 @@ public Builder setIsVersionEnabled(Boolean versionFlag) { @Deprecated public Builder setBucketEncryptionKey(BucketEncryptionKeyInfo info) { - this.bekInfo = info; + if (info == null || info.getKeyName() != null) { + this.bekInfo = info; + } return this; } - public Builder addMetadata(Map metadataMap) { - this.metadata = metadataMap; + @Override + public Builder addAllMetadata(Map map) { + super.addAllMetadata(map); return this; } @@ -303,20 +297,7 @@ public Builder setOwnerName(String owner) { public OmBucketArgs build() { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - OmBucketArgs omBucketArgs = - new OmBucketArgs(volumeName, bucketName, isVersionEnabled, - storageType, metadata, ownerName); - omBucketArgs.setDefaultReplicationConfig(defaultReplicationConfig); - if (quotaInBytesSet) { - omBucketArgs.setQuotaInBytes(quotaInBytes); - } - if (quotaInNamespaceSet) { - omBucketArgs.setQuotaInNamespace(quotaInNamespace); - } - if (bekInfo != null && bekInfo.getKeyName() != null) { - omBucketArgs.setBucketEncryptionKey(bekInfo); - } - return omBucketArgs; + return new OmBucketArgs(this); } } @@ -348,7 +329,7 @@ public BucketArgs getProtobuf() { builder.setOwnerName(ownerName); } - if (bekInfo != null && bekInfo.getKeyName() != null) { + if (bekInfo != null) { builder.setBekInfo(OMPBHelper.convert(bekInfo)); } @@ -357,39 +338,42 @@ public BucketArgs getProtobuf() { /** * Parses BucketInfo protobuf and creates OmBucketArgs. - * @param bucketArgs * @return instance of OmBucketArgs */ public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { - OmBucketArgs omBucketArgs = - new OmBucketArgs(bucketArgs.getVolumeName(), - bucketArgs.getBucketName(), - bucketArgs.hasIsVersionEnabled() ? - bucketArgs.getIsVersionEnabled() : null, - bucketArgs.hasStorageType() ? StorageType.valueOf( - bucketArgs.getStorageType()) : null, - KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList()), - bucketArgs.hasOwnerName() ? - bucketArgs.getOwnerName() : null); - // OmBucketArgs ctor already has more arguments, so setting the default - // replication config separately. + final OmBucketArgs.Builder builder = newBuilder() + .setVolumeName(bucketArgs.getVolumeName()) + .setBucketName(bucketArgs.getBucketName()) + .addAllMetadata(KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList())); + + if (bucketArgs.hasIsVersionEnabled()) { + builder.setIsVersionEnabled(bucketArgs.getIsVersionEnabled()); + } + if (bucketArgs.hasStorageType()) { + builder.setStorageType(StorageType.valueOf(bucketArgs.getStorageType())); + } + if (bucketArgs.hasOwnerName()) { + builder.setOwnerName(bucketArgs.getOwnerName()); + } + if (bucketArgs.hasDefaultReplicationConfig()) { - omBucketArgs.setDefaultReplicationConfig( + builder.setDefaultReplicationConfig( DefaultReplicationConfig.fromProto( bucketArgs.getDefaultReplicationConfig())); } if (bucketArgs.hasQuotaInBytes()) { - omBucketArgs.setQuotaInBytes(bucketArgs.getQuotaInBytes()); + builder.setQuotaInBytes(bucketArgs.getQuotaInBytes()); } if (bucketArgs.hasQuotaInNamespace()) { - omBucketArgs.setQuotaInNamespace(bucketArgs.getQuotaInNamespace()); + builder.setQuotaInNamespace(bucketArgs.getQuotaInNamespace()); } if (bucketArgs.hasBekInfo()) { - omBucketArgs.setBucketEncryptionKey( + builder.setBucketEncryptionKey( OMPBHelper.convert(bucketArgs.getBekInfo())); } - return omBucketArgs; + + return builder.build(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index cc811053eb27..5a83f6dbba62 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -19,17 +19,18 @@ import java.util.ArrayList; -import java.util.BitSet; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.stream.Collectors; +import com.google.common.collect.ImmutableList; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.hdds.utils.db.CopyObject; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.Proto2Codec; import org.apache.hadoop.ozone.OzoneAcl; @@ -43,7 +44,7 @@ /** * A class that encapsulates Bucket Info. */ -public final class OmBucketInfo extends WithObjectID implements Auditable { +public final class OmBucketInfo extends WithObjectID implements Auditable, CopyObject { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(BucketInfo.getDefaultInstance()), OmBucketInfo::getFromProtobuf, @@ -64,7 +65,7 @@ public static Codec getCodec() { /** * ACL Information (mutable). */ - private final List acls; + private final CopyOnWriteArrayList acls; /** * Bucket Version flag. */ @@ -105,72 +106,29 @@ public static Codec getCodec() { /** * Bucket Layout. */ - private BucketLayout bucketLayout; + private final BucketLayout bucketLayout; private String owner; - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param acls - list of ACLs. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - * @param creationTime - Bucket creation time. - * @param modificationTime - Bucket modification time. - * @param metadata - metadata. - * @param bekInfo - bucket encryption key info. - * @param sourceVolume - source volume for bucket links, null otherwise - * @param sourceBucket - source bucket for bucket links, null otherwise - * @param usedBytes - Bucket Quota Usage in bytes. - * @param quotaInBytes Bucket quota in bytes. - * @param quotaInNamespace Bucket quota in counts. - * @param bucketLayout bucket layout. - * @param owner owner of the bucket. - * @param defaultReplicationConfig default replication config. - * @param bucketLayout Bucket Layout. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private OmBucketInfo(String volumeName, - String bucketName, - List acls, - boolean isVersionEnabled, - StorageType storageType, - long creationTime, - long modificationTime, - long objectID, - long updateID, - Map metadata, - BucketEncryptionKeyInfo bekInfo, - String sourceVolume, - String sourceBucket, - long usedBytes, - long usedNamespace, - long quotaInBytes, - long quotaInNamespace, - BucketLayout bucketLayout, - String owner, - DefaultReplicationConfig defaultReplicationConfig) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.acls = acls; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.objectID = objectID; - this.updateID = updateID; - this.metadata = metadata; - this.bekInfo = bekInfo; - this.sourceVolume = sourceVolume; - this.sourceBucket = sourceBucket; - this.usedBytes = usedBytes; - this.usedNamespace = usedNamespace; - this.quotaInBytes = quotaInBytes; - this.quotaInNamespace = quotaInNamespace; - this.bucketLayout = bucketLayout; - this.owner = owner; - this.defaultReplicationConfig = defaultReplicationConfig; + private OmBucketInfo(Builder b) { + super(b); + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.acls = new CopyOnWriteArrayList<>(b.acls); + this.isVersionEnabled = b.isVersionEnabled; + this.storageType = b.storageType; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.bekInfo = b.bekInfo; + this.sourceVolume = b.sourceVolume; + this.sourceBucket = b.sourceBucket; + this.usedBytes = b.usedBytes; + this.usedNamespace = b.usedNamespace; + this.quotaInBytes = b.quotaInBytes; + this.quotaInNamespace = b.quotaInNamespace; + this.bucketLayout = b.bucketLayout; + this.owner = b.owner; + this.defaultReplicationConfig = b.defaultReplicationConfig; } /** @@ -194,7 +152,7 @@ public String getBucketName() { * @return {@literal List} */ public List getAcls() { - return acls; + return ImmutableList.copyOf(acls); } /** @@ -351,7 +309,7 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.BUCKET, this.bucketName); auditMap.put(OzoneConsts.BUCKET_LAYOUT, String.valueOf(this.bucketLayout)); auditMap.put(OzoneConsts.GDPR_FLAG, - this.metadata.get(OzoneConsts.GDPR_FLAG)); + getMetadata().get(OzoneConsts.GDPR_FLAG)); auditMap.put(OzoneConsts.ACLS, (this.acls != null) ? this.acls.toString() : null); auditMap.put(OzoneConsts.IS_VERSION_ENABLED, @@ -370,12 +328,21 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.USED_BYTES, String.valueOf(this.usedBytes)); auditMap.put(OzoneConsts.USED_NAMESPACE, String.valueOf(this.usedNamespace)); + auditMap.put(OzoneConsts.OWNER, this.owner); + auditMap.put(OzoneConsts.REPLICATION_TYPE, + (this.defaultReplicationConfig != null) ? + String.valueOf(this.defaultReplicationConfig.getType()) : null); + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + (this.defaultReplicationConfig != null) ? + this.defaultReplicationConfig.getReplicationConfig() + .getReplication() : null); + auditMap.put(OzoneConsts.QUOTA_IN_BYTES, String.valueOf(this.quotaInBytes)); + auditMap.put(OzoneConsts.QUOTA_IN_NAMESPACE, + String.valueOf(this.quotaInNamespace)); return auditMap; } - /** - * Return a new copy of the object. - */ + @Override public OmBucketInfo copyObject() { Builder builder = toBuilder(); @@ -383,11 +350,6 @@ public OmBucketInfo copyObject() { builder.setBucketEncryptionKey(bekInfo.copy()); } - builder.acls.clear(); - acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); - if (defaultReplicationConfig != null) { builder.setDefaultReplicationConfig(defaultReplicationConfig.copy()); } @@ -396,20 +358,17 @@ public OmBucketInfo copyObject() { } public Builder toBuilder() { - return new Builder() + return new Builder(this) .setVolumeName(volumeName) .setBucketName(bucketName) .setStorageType(storageType) .setIsVersionEnabled(isVersionEnabled) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setObjectID(objectID) - .setUpdateID(updateID) .setBucketEncryptionKey(bekInfo) .setSourceVolume(sourceVolume) .setSourceBucket(sourceBucket) .setAcls(acls) - .addAllMetadata(metadata) .setUsedBytes(usedBytes) .setUsedNamespace(usedNamespace) .setQuotaInBytes(quotaInBytes) @@ -422,37 +381,30 @@ public Builder toBuilder() { /** * Builder for OmBucketInfo. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String volumeName; private String bucketName; - private List acls; - private Boolean isVersionEnabled; - private StorageType storageType; + private final List acls = new ArrayList<>(); + private boolean isVersionEnabled; + private StorageType storageType = StorageType.DISK; private long creationTime; private long modificationTime; - private long objectID; - private long updateID; - private Map metadata; private BucketEncryptionKeyInfo bekInfo; private String sourceVolume; private String sourceBucket; private long usedBytes; private long usedNamespace; - private long quotaInBytes; - private long quotaInNamespace; - private BucketLayout bucketLayout; + private long quotaInBytes = OzoneConsts.QUOTA_RESET; + private long quotaInNamespace = OzoneConsts.QUOTA_RESET; + private BucketLayout bucketLayout = BucketLayout.DEFAULT; private String owner; private DefaultReplicationConfig defaultReplicationConfig; public Builder() { - //Default values - this.acls = new ArrayList<>(); - this.isVersionEnabled = false; - this.storageType = StorageType.DISK; - this.metadata = new HashMap<>(); - this.quotaInBytes = OzoneConsts.QUOTA_RESET; - this.quotaInNamespace = OzoneConsts.QUOTA_RESET; - this.bucketLayout = BucketLayout.DEFAULT; + } + + private Builder(OmBucketInfo obj) { + super(obj); } public Builder setVolumeName(String volume) { @@ -483,7 +435,7 @@ public Builder addAcl(OzoneAcl ozoneAcl) { return this; } - public Builder setIsVersionEnabled(Boolean versionFlag) { + public Builder setIsVersionEnabled(boolean versionFlag) { this.isVersionEnabled = versionFlag; return this; } @@ -503,25 +455,27 @@ public Builder setModificationTime(long modifiedOn) { return this; } + @Override public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } + super.addAllMetadata(additionalMetadata); return this; } @@ -531,31 +485,37 @@ public Builder setBucketEncryptionKey( return this; } + /** @param volume - source volume for bucket links, null otherwise */ public Builder setSourceVolume(String volume) { this.sourceVolume = volume; return this; } + /** @param bucket - source bucket for bucket links, null otherwise */ public Builder setSourceBucket(String bucket) { this.sourceBucket = bucket; return this; } + /** @param quotaUsage - Bucket Quota Usage in bytes. */ public Builder setUsedBytes(long quotaUsage) { this.usedBytes = quotaUsage; return this; } + /** @param quotaUsage - Bucket Quota Usage in counts. */ public Builder setUsedNamespace(long quotaUsage) { this.usedNamespace = quotaUsage; return this; } + /** @param quota Bucket quota in bytes. */ public Builder setQuotaInBytes(long quota) { this.quotaInBytes = quota; return this; } + /** @param quota Bucket quota in counts. */ public Builder setQuotaInNamespace(long quota) { this.quotaInNamespace = quota; return this; @@ -585,13 +545,8 @@ public OmBucketInfo build() { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); Preconditions.checkNotNull(acls); - Preconditions.checkNotNull(isVersionEnabled); Preconditions.checkNotNull(storageType); - return new OmBucketInfo(volumeName, bucketName, acls, isVersionEnabled, - storageType, creationTime, modificationTime, objectID, updateID, - metadata, bekInfo, sourceVolume, sourceBucket, usedBytes, - usedNamespace, quotaInBytes, quotaInNamespace, bucketLayout, owner, - defaultReplicationConfig); + return new OmBucketInfo(this); } } @@ -607,11 +562,11 @@ public BucketInfo getProtobuf() { .setStorageType(storageType.toProto()) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setUsedBytes(usedBytes) .setUsedNamespace(usedNamespace) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) .setQuotaInBytes(quotaInBytes) .setQuotaInNamespace(quotaInNamespace); if (bucketLayout != null) { @@ -739,13 +694,13 @@ public boolean equals(Object o) { Objects.equals(acls, that.acls) && Objects.equals(isVersionEnabled, that.isVersionEnabled) && storageType == that.storageType && - objectID == that.objectID && - updateID == that.updateID && + getObjectID() == that.getObjectID() && + getUpdateID() == that.getUpdateID() && usedBytes == that.usedBytes && usedNamespace == that.usedNamespace && Objects.equals(sourceVolume, that.sourceVolume) && Objects.equals(sourceBucket, that.sourceBucket) && - Objects.equals(metadata, that.metadata) && + Objects.equals(getMetadata(), that.getMetadata()) && Objects.equals(bekInfo, that.bekInfo) && Objects.equals(owner, that.owner) && Objects.equals(defaultReplicationConfig, that.defaultReplicationConfig); @@ -768,9 +723,9 @@ public String toString() { ", bekInfo=" + bekInfo + ", sourceVolume='" + sourceVolume + "'" + ", sourceBucket='" + sourceBucket + "'" + - ", objectID=" + objectID + - ", updateID=" + updateID + - ", metadata=" + metadata + + ", objectID=" + getObjectID() + + ", updateID=" + getUpdateID() + + ", metadata=" + getMetadata() + ", usedBytes=" + usedBytes + ", usedNamespace=" + usedNamespace + ", quotaInBytes=" + quotaInBytes + diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java index 3d1940bd7ce2..55e138dbd105 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java @@ -25,8 +25,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DirectoryInfo; -import java.util.BitSet; -import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -49,6 +47,7 @@ public static Codec getCodec() { } private final String name; // directory name + private String owner; private final long creationTime; private final long modificationTime; @@ -56,12 +55,10 @@ public static Codec getCodec() { private final List acls; public OmDirectoryInfo(Builder builder) { + super(builder); this.name = builder.name; + this.owner = builder.owner; this.acls = builder.acls; - this.metadata = builder.metadata; - this.objectID = builder.objectID; - this.updateID = builder.updateID; - this.parentObjectID = builder.parentObjectID; this.creationTime = builder.creationTime; this.modificationTime = builder.modificationTime; } @@ -78,38 +75,35 @@ public static OmDirectoryInfo.Builder newBuilder() { /** * Builder for Directory Info. */ - public static class Builder { - private long parentObjectID; // pointer to parent directory - - private long objectID; - private long updateID; - + public static class Builder extends WithParentObjectId.Builder { private String name; + private String owner; private long creationTime; private long modificationTime; private final List acls; - private final Map metadata; public Builder() { //Default values this.acls = new LinkedList<>(); - this.metadata = new HashMap<>(); } + @Override public Builder setParentObjectID(long parentObjectId) { - this.parentObjectID = parentObjectId; + super.setParentObjectID(parentObjectId); return this; } + @Override public Builder setObjectID(long objectId) { - this.objectID = objectId; + super.setObjectID(objectId); return this; } + @Override public Builder setUpdateID(long updateId) { - this.updateID = updateId; + super.setUpdateID(updateId); return this; } @@ -118,6 +112,11 @@ public Builder setName(String dirName) { return this; } + public Builder setOwner(String ownerName) { + this.owner = ownerName; + return this; + } + public Builder setCreationTime(long newCreationTime) { this.creationTime = newCreationTime; return this; @@ -142,15 +141,15 @@ public Builder addAcl(OzoneAcl ozoneAcl) { return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } + super.addAllMetadata(additionalMetadata); return this; } @@ -164,10 +163,6 @@ public String toString() { return getPath() + ":" + getObjectID(); } - public long getParentObjectID() { - return parentObjectID; - } - public String getPath() { return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getName(); } @@ -176,6 +171,10 @@ public String getName() { return name; } + public String getOwner() { + return owner; + } + public long getCreationTime() { return creationTime; } @@ -196,10 +195,13 @@ public DirectoryInfo getProtobuf() { DirectoryInfo.newBuilder().setName(name) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) - .setObjectID(objectID) - .setUpdateID(updateID) - .setParentID(parentObjectID); + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) + .setParentID(getParentObjectID()); + if (owner != null) { + pib.setOwnerName(owner); + } if (acls != null) { pib.addAllAcls(OzoneAclUtil.toProtobuf(acls)); } @@ -230,6 +232,9 @@ public static OmDirectoryInfo getFromProtobuf(DirectoryInfo dirInfo) { if (dirInfo.hasUpdateID()) { opib.setUpdateID(dirInfo.getUpdateID()); } + if (dirInfo.hasOwnerName()) { + opib.setOwner(dirInfo.getOwnerName()); + } return opib.build(); } @@ -245,16 +250,17 @@ public boolean equals(Object o) { return creationTime == omDirInfo.creationTime && modificationTime == omDirInfo.modificationTime && name.equals(omDirInfo.name) && - Objects.equals(metadata, omDirInfo.metadata) && + Objects.equals(owner, omDirInfo.owner) && + Objects.equals(getMetadata(), omDirInfo.getMetadata()) && Objects.equals(acls, omDirInfo.acls) && - objectID == omDirInfo.objectID && - updateID == omDirInfo.updateID && - parentObjectID == omDirInfo.parentObjectID; + getObjectID() == omDirInfo.getObjectID() && + getUpdateID() == omDirInfo.getUpdateID() && + getParentObjectID() == omDirInfo.getParentObjectID(); } @Override public int hashCode() { - return Objects.hash(objectID, parentObjectID, name); + return Objects.hash(getObjectID(), getParentObjectID(), name); } /** @@ -264,18 +270,16 @@ public int hashCode() { public OmDirectoryInfo copyObject() { OmDirectoryInfo.Builder builder = new Builder() .setName(name) + .setOwner(owner) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setParentObjectID(parentObjectID) - .setObjectID(objectID) - .setUpdateID(updateID); - - acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); + .setAcls(acls) + .setParentObjectID(getParentObjectID()) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()); - if (metadata != null) { - builder.addAllMetadata(metadata); + if (getMetadata() != null) { + builder.addAllMetadata(getMetadata()); } return builder.build(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index 453dc3b957c0..19d5ab4fa713 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -39,45 +39,41 @@ public final class OmKeyArgs implements Auditable { private final String volumeName; private final String bucketName; private final String keyName; + private final String ownerName; private long dataSize; private final ReplicationConfig replicationConfig; private List locationInfoList; private final boolean isMultipartKey; private final String multipartUploadID; private final int multipartUploadPartNumber; - private Map metadata; - private boolean sortDatanodesInPipeline; - private List acls; - private boolean latestVersionLocation; - private boolean recursive; - private boolean headOp; - private boolean forceUpdateContainerCacheFromSCM; - - @SuppressWarnings("parameternumber") - private OmKeyArgs(String volumeName, String bucketName, String keyName, - long dataSize, ReplicationConfig replicationConfig, - List locationInfoList, boolean isMultipart, - String uploadID, int partNumber, - Map metadataMap, - List acls, boolean sortDatanode, - boolean latestVersionLocation, boolean recursive, boolean headOp, - boolean forceUpdateContainerCacheFromSCM) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.replicationConfig = replicationConfig; - this.locationInfoList = locationInfoList; - this.isMultipartKey = isMultipart; - this.multipartUploadID = uploadID; - this.multipartUploadPartNumber = partNumber; - this.metadata = metadataMap; - this.acls = acls; - this.sortDatanodesInPipeline = sortDatanode; - this.latestVersionLocation = latestVersionLocation; - this.recursive = recursive; - this.headOp = headOp; - this.forceUpdateContainerCacheFromSCM = forceUpdateContainerCacheFromSCM; + private final Map metadata; + private final boolean sortDatanodesInPipeline; + private final List acls; + private final boolean latestVersionLocation; + private final boolean recursive; + private final boolean headOp; + private final boolean forceUpdateContainerCacheFromSCM; + private final Map tags; + + private OmKeyArgs(Builder b) { + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.dataSize = b.dataSize; + this.replicationConfig = b.replicationConfig; + this.locationInfoList = b.locationInfoList; + this.isMultipartKey = b.isMultipartKey; + this.multipartUploadID = b.multipartUploadID; + this.multipartUploadPartNumber = b.multipartUploadPartNumber; + this.metadata = b.metadata; + this.acls = b.acls; + this.sortDatanodesInPipeline = b.sortDatanodesInPipeline; + this.latestVersionLocation = b.latestVersionLocation; + this.recursive = b.recursive; + this.headOp = b.headOp; + this.forceUpdateContainerCacheFromSCM = b.forceUpdateContainerCacheFromSCM; + this.ownerName = b.ownerName; + this.tags = b.tags; } public boolean getIsMultipartKey() { @@ -112,6 +108,10 @@ public String getKeyName() { return keyName; } + public String getOwner() { + return ownerName; + } + public long getDataSize() { return dataSize; } @@ -124,10 +124,6 @@ public Map getMetadata() { return metadata; } - public void setMetadata(Map metadata) { - this.metadata = metadata; - } - public void setLocationInfoList(List locationInfoList) { this.locationInfoList = locationInfoList; } @@ -156,12 +152,17 @@ public boolean isForceUpdateContainerCacheFromSCM() { return forceUpdateContainerCacheFromSCM; } + public Map getTags() { + return tags; + } + @Override public Map toAuditMap() { Map auditMap = new LinkedHashMap<>(); auditMap.put(OzoneConsts.VOLUME, this.volumeName); auditMap.put(OzoneConsts.BUCKET, this.bucketName); auditMap.put(OzoneConsts.KEY, this.keyName); + auditMap.put(OzoneConsts.OWNER, this.ownerName); auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(this.dataSize)); auditMap.put(OzoneConsts.REPLICATION_CONFIG, (this.replicationConfig != null) ? @@ -182,6 +183,7 @@ public OmKeyArgs.Builder toBuilder() { .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) + .setOwnerName(ownerName) .setDataSize(dataSize) .setReplicationConfig(replicationConfig) .setLocationInfoList(locationInfoList) @@ -193,7 +195,8 @@ public OmKeyArgs.Builder toBuilder() { .setHeadOp(headOp) .setLatestVersionLocation(latestVersionLocation) .setAcls(acls) - .setForceUpdateContainerCacheFromSCM(forceUpdateContainerCacheFromSCM); + .setForceUpdateContainerCacheFromSCM(forceUpdateContainerCacheFromSCM) + .addAllTags(tags); } @Nonnull @@ -218,19 +221,21 @@ public static class Builder { private String volumeName; private String bucketName; private String keyName; + private String ownerName; private long dataSize; private ReplicationConfig replicationConfig; private List locationInfoList; private boolean isMultipartKey; private String multipartUploadID; private int multipartUploadPartNumber; - private Map metadata = new HashMap<>(); + private final Map metadata = new HashMap<>(); private boolean sortDatanodesInPipeline; private boolean latestVersionLocation; private List acls; private boolean recursive; private boolean headOp; private boolean forceUpdateContainerCacheFromSCM; + private final Map tags = new HashMap<>(); public Builder setVolumeName(String volume) { this.volumeName = volume; @@ -247,6 +252,11 @@ public Builder setKeyName(String key) { return this; } + public Builder setOwnerName(String owner) { + this.ownerName = owner; + return this; + } + public Builder setDataSize(long size) { this.dataSize = size; return this; @@ -300,6 +310,16 @@ public Builder addAllMetadataGdpr(Map metadatamap) { return this; } + public Builder addTag(String key, String value) { + this.tags.put(key, value); + return this; + } + + public Builder addAllTags(Map tagmap) { + this.tags.putAll(tagmap); + return this; + } + public Builder setSortDatanodesInPipeline(boolean sort) { this.sortDatanodesInPipeline = sort; return this; @@ -326,12 +346,7 @@ public Builder setForceUpdateContainerCacheFromSCM(boolean value) { } public OmKeyArgs build() { - return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, - replicationConfig, locationInfoList, isMultipartKey, - multipartUploadID, - multipartUploadPartNumber, metadata, acls, - sortDatanodesInPipeline, latestVersionLocation, recursive, headOp, - forceUpdateContainerCacheFromSCM); + return new OmKeyArgs(this); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index c3a1a4a3d77b..c8e7f8f60938 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -19,12 +19,13 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.BitSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import com.google.common.collect.ImmutableList; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.fs.FileChecksum; @@ -54,7 +55,7 @@ * datanode. Also, this is the metadata written to om.db on server side. */ public final class OmKeyInfo extends WithParentObjectId - implements CopyObject { + implements CopyObject, WithTags { private static final Logger LOG = LoggerFactory.getLogger(OmKeyInfo.class); private static final Codec CODEC_TRUE = newCodec(true); @@ -94,51 +95,35 @@ public static Codec getCodec(boolean ignorePipeline) { * keyName is "a/b/key1" then the fileName stores "key1". */ private String fileName; + private String ownerName; /** * ACL Information. */ - private List acls; - - @SuppressWarnings("parameternumber") - OmKeyInfo(String volumeName, String bucketName, String keyName, - List versions, long dataSize, - long creationTime, long modificationTime, - ReplicationConfig replicationConfig, - Map metadata, - FileEncryptionInfo encInfo, List acls, - long objectID, long updateID, FileChecksum fileChecksum) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.keyLocationVersions = versions; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.replicationConfig = replicationConfig; - this.metadata = metadata; - this.encInfo = encInfo; - this.acls = acls; - this.objectID = objectID; - this.updateID = updateID; - this.fileChecksum = fileChecksum; - } - - @SuppressWarnings("parameternumber") - OmKeyInfo(String volumeName, String bucketName, String keyName, - String fileName, List versions, - long dataSize, long creationTime, long modificationTime, - ReplicationConfig replicationConfig, - Map metadata, - FileEncryptionInfo encInfo, List acls, - long parentObjectID, long objectID, long updateID, - FileChecksum fileChecksum, boolean isFile) { - this(volumeName, bucketName, keyName, versions, dataSize, - creationTime, modificationTime, replicationConfig, metadata, - encInfo, acls, objectID, updateID, fileChecksum); - this.fileName = fileName; - this.parentObjectID = parentObjectID; - this.isFile = isFile; + private final CopyOnWriteArrayList acls; + + /** + * Used for S3 tags. + */ + private Map tags; + + private OmKeyInfo(Builder b) { + super(b); + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.dataSize = b.dataSize; + this.keyLocationVersions = b.omKeyLocationInfoGroups; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.replicationConfig = b.replicationConfig; + this.encInfo = b.encInfo; + this.acls = new CopyOnWriteArrayList<>(b.acls); + this.fileChecksum = b.fileChecksum; + this.fileName = b.fileName; + this.isFile = b.isFile; + this.ownerName = b.ownerName; + this.tags = b.tags; } public String getVolumeName() { @@ -181,11 +166,10 @@ public String getFileName() { return fileName; } - public long getParentObjectID() { - return parentObjectID; + public String getOwnerName() { + return ownerName; } - public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() { return keyLocationVersions.size() == 0 ? null : keyLocationVersions.get(keyLocationVersions.size() - 1); @@ -200,10 +184,6 @@ public void setKeyLocationVersions( this.keyLocationVersions = keyLocationVersions; } - public void updateModifcationTime() { - this.modificationTime = Time.monotonicNow(); - } - public void setFile(boolean file) { isFile = file; } @@ -213,7 +193,17 @@ public boolean isFile() { } public boolean isHsync() { - return metadata.containsKey(OzoneConsts.HSYNC_CLIENT_ID); + return getMetadata().containsKey(OzoneConsts.HSYNC_CLIENT_ID); + } + + @Override + public Map getTags() { + return tags; + } + + @Override + public void setTags(Map tags) { + this.tags = tags; } /** @@ -398,7 +388,7 @@ public FileEncryptionInfo getFileEncryptionInfo() { } public List getAcls() { - return acls; + return ImmutableList.copyOf(acls); } public boolean addAcl(OzoneAcl acl) { @@ -413,10 +403,6 @@ public boolean setAcls(List newAcls) { return OzoneAclUtil.setAcl(acls, newAcls); } - public void setParentObjectID(long parentObjectID) { - this.parentObjectID = parentObjectID; - } - public void setReplicationConfig(ReplicationConfig repConfig) { this.replicationConfig = repConfig; } @@ -447,32 +433,31 @@ public String toString() { /** * Builder of OmKeyInfo. */ - public static class Builder { + public static class Builder extends WithParentObjectId.Builder { private String volumeName; private String bucketName; private String keyName; + private String ownerName; private long dataSize; - private List omKeyLocationInfoGroups = + private final List omKeyLocationInfoGroups = new ArrayList<>(); private long creationTime; private long modificationTime; private ReplicationConfig replicationConfig; - private Map metadata; private FileEncryptionInfo encInfo; - private List acls; - private long objectID; - private long updateID; + private final List acls = new ArrayList<>(); // not persisted to DB. FileName will be the last element in path keyName. private String fileName; - private long parentObjectID; private FileChecksum fileChecksum; private boolean isFile; + private final Map tags = new HashMap<>(); public Builder() { - this.metadata = new HashMap<>(); - omKeyLocationInfoGroups = new ArrayList<>(); - acls = new ArrayList<>(); + } + + public Builder(OmKeyInfo obj) { + super(obj); } public Builder setVolumeName(String volume) { @@ -490,6 +475,11 @@ public Builder setKeyName(String key) { return this; } + public Builder setOwnerName(String owner) { + this.ownerName = owner; + return this; + } + public Builder setOmKeyLocationInfos( List omKeyLocationInfoList) { if (omKeyLocationInfoList != null) { @@ -526,13 +516,15 @@ public Builder setReplicationConfig(ReplicationConfig replConfig) { return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map newMetadata) { - metadata.putAll(newMetadata); + super.addAllMetadata(newMetadata); return this; } @@ -555,13 +547,15 @@ public Builder addAcl(OzoneAcl ozoneAcl) { return this; } + @Override public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -570,8 +564,9 @@ public Builder setFileName(String keyFileName) { return this; } + @Override public Builder setParentObjectID(long parentID) { - this.parentObjectID = parentID; + super.setParentObjectID(parentID); return this; } @@ -585,12 +580,18 @@ public Builder setFile(boolean isAFile) { return this; } + public Builder addTag(String key, String value) { + tags.put(key, value); + return this; + } + + public Builder addAllTags(Map keyTags) { + tags.putAll(keyTags); + return this; + } + public OmKeyInfo build() { - return new OmKeyInfo( - volumeName, bucketName, keyName, fileName, - omKeyLocationInfoGroups, dataSize, creationTime, - modificationTime, replicationConfig, metadata, encInfo, acls, - parentObjectID, objectID, updateID, fileChecksum, isFile); + return new OmKeyInfo(this); } } @@ -674,11 +675,12 @@ private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName, .addAllKeyLocationList(keyLocations) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) + .addAllTags(KeyValueUtil.toProtobuf(getTags())) .addAllAcls(OzoneAclUtil.toProtobuf(acls)) - .setObjectID(objectID) - .setUpdateID(updateID) - .setParentID(parentObjectID); + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) + .setParentID(getParentObjectID()); FileChecksumProto fileChecksumProto = OMPBHelper.convert(fileChecksum); if (fileChecksumProto != null) { @@ -693,6 +695,9 @@ private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName, kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo)); } kb.setIsFile(isFile); + if (ownerName != null) { + kb.setOwnerName(ownerName); + } return kb.build(); } @@ -719,6 +724,7 @@ public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) throws IOException { .fromProto(keyInfo.getType(), keyInfo.getFactor(), keyInfo.getEcReplicationConfig())) .addAllMetadata(KeyValueUtil.getFromProtobuf(keyInfo.getMetadataList())) + .addAllTags(KeyValueUtil.getFromProtobuf(keyInfo.getTagsList())) .setFileEncryptionInfo(keyInfo.hasFileEncryptionInfo() ? OMPBHelper.convert(keyInfo.getFileEncryptionInfo()) : null) .setAcls(OzoneAclUtil.fromProtobuf(keyInfo.getAclsList())); @@ -740,6 +746,9 @@ public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) throws IOException { builder.setFile(keyInfo.getIsFile()); } + if (keyInfo.hasOwnerName()) { + builder.setOwnerName(keyInfo.getOwnerName()); + } // not persisted to DB. FileName will be filtered out from keyName builder.setFileName(OzoneFSUtils.getFileName(keyInfo.getKeyName())); return builder.build(); @@ -751,10 +760,11 @@ public String getObjectInfo() { "volume='" + volumeName + '\'' + ", bucket='" + bucketName + '\'' + ", key='" + keyName + '\'' + + ", owner='" + ownerName + '\'' + ", dataSize='" + dataSize + '\'' + ", creationTime='" + creationTime + '\'' + - ", objectID='" + objectID + '\'' + - ", parentID='" + parentObjectID + '\'' + + ", objectID='" + getObjectID() + '\'' + + ", parentID='" + getParentObjectID() + '\'' + ", replication='" + replicationConfig + '\'' + ", fileChecksum='" + fileChecksum + '}'; @@ -764,18 +774,19 @@ public String getObjectInfo() { public boolean isKeyInfoSame(OmKeyInfo omKeyInfo, boolean checkPath, boolean checkKeyLocationVersions, boolean checkModificationTime, - boolean checkUpdateID) { + boolean checkUpdateID, + boolean checkOwnerName) { boolean isEqual = dataSize == omKeyInfo.dataSize && creationTime == omKeyInfo.creationTime && volumeName.equals(omKeyInfo.volumeName) && bucketName.equals(omKeyInfo.bucketName) && replicationConfig.equals(omKeyInfo.replicationConfig) && - Objects.equals(metadata, omKeyInfo.metadata) && + Objects.equals(getMetadata(), omKeyInfo.getMetadata()) && Objects.equals(acls, omKeyInfo.acls) && - objectID == omKeyInfo.objectID; + getObjectID() == omKeyInfo.getObjectID(); if (isEqual && checkUpdateID) { - isEqual = updateID == omKeyInfo.updateID; + isEqual = getUpdateID() == omKeyInfo.getUpdateID(); } if (isEqual && checkModificationTime) { @@ -783,7 +794,7 @@ public boolean isKeyInfoSame(OmKeyInfo omKeyInfo, boolean checkPath, } if (isEqual && checkPath) { - isEqual = parentObjectID == omKeyInfo.parentObjectID && + isEqual = getParentObjectID() == omKeyInfo.getParentObjectID() && keyName.equals(omKeyInfo.keyName); } @@ -792,6 +803,11 @@ public boolean isKeyInfoSame(OmKeyInfo omKeyInfo, boolean checkPath, .equals(keyLocationVersions, omKeyInfo.keyLocationVersions); } + if (isEqual && checkOwnerName) { + isEqual = Objects + .equals(ownerName, omKeyInfo.ownerName); + } + return isEqual; } @@ -803,12 +819,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - return isKeyInfoSame((OmKeyInfo) o, true, true, true, true); + return isKeyInfoSame((OmKeyInfo) o, true, true, true, true, true); } @Override public int hashCode() { - return Objects.hash(volumeName, bucketName, keyName, parentObjectID); + return Objects.hash(volumeName, bucketName, keyName, getParentObjectID()); } /** @@ -816,18 +832,17 @@ public int hashCode() { */ @Override public OmKeyInfo copyObject() { - OmKeyInfo.Builder builder = new OmKeyInfo.Builder() + OmKeyInfo.Builder builder = new OmKeyInfo.Builder(this) .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) + .setOwnerName(ownerName) .setCreationTime(creationTime) .setModificationTime(modificationTime) .setDataSize(dataSize) .setReplicationConfig(replicationConfig) .setFileEncryptionInfo(encInfo) - .setObjectID(objectID) - .setUpdateID(updateID) - .setParentObjectID(parentObjectID) + .setAcls(acls) .setFileName(fileName) .setFile(isFile); @@ -837,12 +852,12 @@ public OmKeyInfo copyObject() { keyLocationVersion.getLocationList(), keyLocationVersion.isMultipartKey()))); - acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); + if (getMetadata() != null) { + getMetadata().forEach(builder::addMetadata); + } - if (metadata != null) { - metadata.forEach((k, v) -> builder.addMetadata(k, v)); + if (getTags() != null) { + getTags().forEach(builder::addTag); } if (fileChecksum != null) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java index 646cb421e434..bbf1a1bdae53 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java @@ -24,8 +24,15 @@ public class OmMultipartCommitUploadPartInfo { private final String partName; - public OmMultipartCommitUploadPartInfo(String name) { - this.partName = name; + private final String eTag; + + public OmMultipartCommitUploadPartInfo(String partName, String eTag) { + this.partName = partName; + this.eTag = eTag; + } + + public String getETag() { + return eTag; } public String getPartName() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java index 4f57e075bd70..61402ee28e6c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.hdds.utils.db.CopyObject; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.Proto2Codec; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartKeyInfo; @@ -37,7 +38,7 @@ * This class represents multipart upload information for a key, which holds * upload part information of the key. */ -public final class OmMultipartKeyInfo extends WithObjectID { +public final class OmMultipartKeyInfo extends WithObjectID implements CopyObject { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(MultipartKeyInfo.getDefaultInstance()), OmMultipartKeyInfo::getFromProto, @@ -155,37 +156,33 @@ public PartKeyInfo lastEntry() { * multiKey1 | 1026 | 1025 | * ------------------------------------------| */ - private long parentID; + private final long parentID; /** * Construct OmMultipartKeyInfo object which holds multipart upload * information for a key. */ - @SuppressWarnings("parameternumber") - private OmMultipartKeyInfo(String id, long creationTime, - ReplicationConfig replicationConfig, - PartKeyInfoMap sortedMap, long objectID, long updateID, - long parentObjId) { - this.uploadID = id; - this.creationTime = creationTime; - this.replicationConfig = replicationConfig; - this.partKeyInfoMap = sortedMap; - this.objectID = objectID; - this.updateID = updateID; - this.parentID = parentObjId; + private OmMultipartKeyInfo(Builder b) { + super(b); + this.uploadID = b.uploadID; + this.creationTime = b.creationTime; + this.replicationConfig = b.replicationConfig; + this.partKeyInfoMap = new PartKeyInfoMap(b.partKeyInfoList); + this.parentID = b.parentID; } - /** - * Construct OmMultipartKeyInfo object which holds multipart upload - * information for a key. - */ - @SuppressWarnings("parameternumber") - private OmMultipartKeyInfo(String id, long creationTime, - ReplicationConfig replicationConfig, - SortedMap list, long objectID, long updateID, - long parentObjId) { - this(id, creationTime, replicationConfig, new PartKeyInfoMap(list), - objectID, updateID, parentObjId); + /** Copy constructor. */ + private OmMultipartKeyInfo(OmMultipartKeyInfo b) { + this.uploadID = b.uploadID; + this.creationTime = b.creationTime; + this.replicationConfig = b.replicationConfig; + // PartKeyInfoMap is an immutable data structure. Whenever a PartKeyInfo + // is added, it returns a new shallow copy of the PartKeyInfoMap Object + // so here we can directly pass in partKeyInfoMap + this.partKeyInfoMap = b.partKeyInfoMap; + setObjectID(b.getObjectID()); + setUpdateID(b.getUpdateID()); + this.parentID = b.parentID; } /** @@ -228,13 +225,11 @@ public ReplicationConfig getReplicationConfig() { /** * Builder of OmMultipartKeyInfo. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String uploadID; private long creationTime; private ReplicationConfig replicationConfig; - private TreeMap partKeyInfoList; - private long objectID; - private long updateID; + private final TreeMap partKeyInfoList; private long parentID; public Builder() { @@ -271,12 +266,12 @@ public Builder addPartKeyInfoList(int partNum, PartKeyInfo partKeyInfo) { } public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -286,8 +281,7 @@ public Builder setParentID(long parentObjId) { } public OmMultipartKeyInfo build() { - return new OmMultipartKeyInfo(uploadID, creationTime, replicationConfig, - partKeyInfoList, objectID, updateID, parentID); + return new OmMultipartKeyInfo(this); } } @@ -308,10 +302,15 @@ public static OmMultipartKeyInfo getFromProto( multipartKeyInfo.getEcReplicationConfig() ); - return new OmMultipartKeyInfo(multipartKeyInfo.getUploadID(), - multipartKeyInfo.getCreationTime(), replicationConfig, - list, multipartKeyInfo.getObjectID(), - multipartKeyInfo.getUpdateID(), multipartKeyInfo.getParentID()); + return new Builder() + .setUploadID(multipartKeyInfo.getUploadID()) + .setCreationTime(multipartKeyInfo.getCreationTime()) + .setReplicationConfig(replicationConfig) + .setPartKeyInfoList(list) + .setObjectID(multipartKeyInfo.getObjectID()) + .setUpdateID(multipartKeyInfo.getUpdateID()) + .setParentID(multipartKeyInfo.getParentID()) + .build(); } /** @@ -323,8 +322,8 @@ public MultipartKeyInfo getProto() { .setUploadID(uploadID) .setCreationTime(creationTime) .setType(replicationConfig.getReplicationType()) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setParentID(parentID); if (replicationConfig instanceof ECReplicationConfig) { @@ -357,12 +356,9 @@ public int hashCode() { return uploadID.hashCode(); } + @Override public OmMultipartKeyInfo copyObject() { - // PartKeyInfoMap is an immutable data structure. Whenever a PartKeyInfo - // is added, it returns a new shallow copy of the PartKeyInfoMap Object - // so here we can directly pass in partKeyInfoMap - return new OmMultipartKeyInfo(uploadID, creationTime, replicationConfig, - partKeyInfoMap, objectID, updateID, parentID); + return new OmMultipartKeyInfo(this); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java index 63e6353c1850..ff39661d01b5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java @@ -56,8 +56,9 @@ public Map getMultipartMap() { */ public List getPartsList() { List partList = new ArrayList<>(); - multipartMap.forEach((partNumber, partName) -> partList.add(Part - .newBuilder().setPartName(partName).setPartNumber(partNumber).build())); + multipartMap.forEach((partNumber, eTag) -> partList.add(Part + // set partName equal to eTag for back compatibility (partName is a required property) + .newBuilder().setPartName(eTag).setETag(eTag).setPartNumber(partNumber).build())); return partList; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java index fbf519c22682..0ba0e26acda2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java @@ -79,6 +79,7 @@ public void addPartList(List partInfos) { public void addProtoPartList(List partInfos) { partInfos.forEach(partInfo -> partInfoList.add(new OmPartInfo( partInfo.getPartNumber(), partInfo.getPartName(), - partInfo.getModificationTime(), partInfo.getSize()))); + partInfo.getModificationTime(), partInfo.getSize(), + partInfo.getETag()))); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java index 2d753a5caa5a..35d97cd4ffdc 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java @@ -23,17 +23,20 @@ /** * Class that defines information about each part of a multipart upload key. */ -public class OmPartInfo { - private int partNumber; - private String partName; - private long modificationTime; - private long size; +public final class OmPartInfo { + private final int partNumber; + private final String partName; + private final long modificationTime; + private final long size; + private final String eTag; - public OmPartInfo(int number, String name, long time, long size) { + public OmPartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -52,9 +55,19 @@ public long getSize() { return size; } + public String getETag() { + return eTag; + } + public PartInfo getProto() { - return PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName) - .setModificationTime(modificationTime) - .setSize(size).build(); + PartInfo.Builder builder = PartInfo.newBuilder() + .setPartNumber(partNumber) + .setPartName(partName) + .setModificationTime(modificationTime) + .setSize(size); + if (eTag != null) { + builder.setETag(eTag); + } + return builder.build(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index c5c8f5ca8e2b..499b4878362d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -18,13 +18,13 @@ package org.apache.hadoop.ozone.om.helpers; import java.util.ArrayList; -import java.util.BitSet; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import com.google.common.collect.ImmutableList; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.CopyObject; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; @@ -60,7 +60,7 @@ public static Codec getCodec() { private long quotaInBytes; private long quotaInNamespace; private long usedNamespace; - private List acls; + private final CopyOnWriteArrayList acls; /** * Reference count on this Ozone volume. * @@ -75,40 +75,18 @@ public static Codec getCodec() { */ private long refCount; - /** - * Private constructor, constructed via builder. - * @param adminName - Administrator's name. - * @param ownerName - Volume owner's name - * @param volume - volume name - * @param quotaInBytes - Volume Quota in bytes. - * @param quotaInNamespace - Volume Quota in counts. - * @param usedNamespace - Volume Namespace Quota Usage in counts. - * @param metadata - metadata map for custom key/value data. - * @param acls - list of volume acls. - * @param creationTime - Volume creation time. - * @param objectID - ID of this object. - * @param updateID - A sequence number that denotes the last update on this - * object. This is a monotonically increasing number. - */ - @SuppressWarnings({"checkstyle:ParameterNumber", - "This is invoked from a builder."}) - private OmVolumeArgs(String adminName, String ownerName, String volume, - long quotaInBytes, long quotaInNamespace, long usedNamespace, - Map metadata, List acls, long creationTime, - long modificationTime, long objectID, long updateID, long refCount) { - this.adminName = adminName; - this.ownerName = ownerName; - this.volume = volume; - this.quotaInBytes = quotaInBytes; - this.quotaInNamespace = quotaInNamespace; - this.usedNamespace = usedNamespace; - this.metadata = metadata; - this.acls = acls; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.objectID = objectID; - this.updateID = updateID; - this.refCount = refCount; + private OmVolumeArgs(Builder b) { + super(b); + this.adminName = b.adminName; + this.ownerName = b.ownerName; + this.volume = b.volume; + this.quotaInBytes = b.quotaInBytes; + this.quotaInNamespace = b.quotaInNamespace; + this.usedNamespace = b.usedNamespace; + this.acls = new CopyOnWriteArrayList<>(b.acls); + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.refCount = b.refCount; } public long getRefCount() { @@ -221,7 +199,7 @@ public long getQuotaInNamespace() { } public List getAcls() { - return acls; + return ImmutableList.copyOf(acls); } public List getDefaultAcls() { @@ -286,18 +264,18 @@ public boolean equals(Object o) { return false; } OmVolumeArgs that = (OmVolumeArgs) o; - return Objects.equals(this.objectID, that.objectID); + return Objects.equals(this.getObjectID(), that.getObjectID()); } @Override public int hashCode() { - return Objects.hash(this.objectID); + return Objects.hash(getObjectID()); } /** * Builder for OmVolumeArgs. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String adminName; private String ownerName; private String volume; @@ -306,30 +284,18 @@ public static class Builder { private long quotaInBytes; private long quotaInNamespace; private long usedNamespace; - private Map metadata; - private List acls; - private long objectID; - private long updateID; + private final List acls; private long refCount; - /** - * Sets the Object ID for this Object. - * Object ID are unique and immutable identifier for each object in the - * System. - * @param id - long - */ + @Override public Builder setObjectID(long id) { - this.objectID = id; + super.setObjectID(id); return this; } - /** - * Sets the update ID for this Object. Update IDs are monotonically - * increasing values which are updated each time there is an update. - * @param id - long - */ + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -337,8 +303,11 @@ public Builder setUpdateID(long id) { * Constructs a builder. */ public Builder() { - metadata = new HashMap<>(); - acls = new ArrayList(); + this(new ArrayList<>()); + } + + private Builder(List acls) { + this.acls = acls; quotaInBytes = OzoneConsts.QUOTA_RESET; quotaInNamespace = OzoneConsts.QUOTA_RESET; } @@ -383,15 +352,15 @@ public Builder setUsedNamespace(long namespaceUsage) { return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); // overwrite if present. + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map additionalMetaData) { - if (additionalMetaData != null) { - metadata.putAll(additionalMetaData); - } + super.addAllMetadata(additionalMetaData); return this; } @@ -400,23 +369,18 @@ public Builder addOzoneAcls(OzoneAcl acl) { return this; } - public void setRefCount(long refCount) { + public Builder setRefCount(long refCount) { Preconditions.checkState(refCount >= 0L, "refCount should not be negative"); this.refCount = refCount; + return this; } - /** - * Constructs a CreateVolumeArgument. - * @return CreateVolumeArgs. - */ public OmVolumeArgs build() { Preconditions.checkNotNull(adminName); Preconditions.checkNotNull(ownerName); Preconditions.checkNotNull(volume); - return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - quotaInNamespace, usedNamespace, metadata, acls, creationTime, - modificationTime, objectID, updateID, refCount); + return new OmVolumeArgs(this); } } @@ -430,34 +394,32 @@ public VolumeInfo getProtobuf() { .setQuotaInBytes(quotaInBytes) .setQuotaInNamespace(quotaInNamespace) .setUsedNamespace(usedNamespace) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) .addAllVolumeAcls(aclList) .setCreationTime( creationTime == 0 ? System.currentTimeMillis() : creationTime) .setModificationTime(modificationTime) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setRefCount(refCount) .build(); } public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) { - List acls = OzoneAclUtil.fromProtobuf( - volInfo.getVolumeAclsList()); - return new OmVolumeArgs( - volInfo.getAdminName(), - volInfo.getOwnerName(), - volInfo.getVolume(), - volInfo.getQuotaInBytes(), - volInfo.getQuotaInNamespace(), - volInfo.getUsedNamespace(), - KeyValueUtil.getFromProtobuf(volInfo.getMetadataList()), - acls, - volInfo.getCreationTime(), - volInfo.getModificationTime(), - volInfo.getObjectID(), - volInfo.getUpdateID(), - volInfo.getRefCount()); + return new Builder(OzoneAclUtil.fromProtobuf(volInfo.getVolumeAclsList())) + .setAdminName(volInfo.getAdminName()) + .setOwnerName(volInfo.getOwnerName()) + .setVolume(volInfo.getVolume()) + .setQuotaInBytes(volInfo.getQuotaInBytes()) + .setQuotaInNamespace(volInfo.getQuotaInNamespace()) + .setUsedNamespace(volInfo.getUsedNamespace()) + .addAllMetadata(KeyValueUtil.getFromProtobuf(volInfo.getMetadataList())) + .setCreationTime(volInfo.getCreationTime()) + .setModificationTime(volInfo.getModificationTime()) + .setObjectID(volInfo.getObjectID()) + .setUpdateID(volInfo.getUpdateID()) + .setRefCount(volInfo.getRefCount()) + .build(); } @Override @@ -475,19 +437,19 @@ public String getObjectInfo() { @Override public OmVolumeArgs copyObject() { - Map cloneMetadata = new HashMap<>(); - if (metadata != null) { - metadata.forEach((k, v) -> cloneMetadata.put(k, v)); - } - - List cloneAcls = new ArrayList(acls.size()); - - acls.forEach(acl -> cloneAcls.add(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); - - return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - quotaInNamespace, usedNamespace, cloneMetadata, cloneAcls, - creationTime, modificationTime, objectID, updateID, refCount); + return new Builder(acls) + .setAdminName(adminName) + .setOwnerName(ownerName) + .setVolume(volume) + .setQuotaInBytes(quotaInBytes) + .setQuotaInNamespace(quotaInNamespace) + .setUsedNamespace(usedNamespace) + .addAllMetadata(getMetadata()) + .setCreationTime(creationTime) + .setModificationTime(modificationTime) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) + .setRefCount(refCount) + .build(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java index 134675cdce84..517f0c14ce09 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java @@ -27,17 +27,17 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; +import java.util.stream.Stream; + import org.apache.hadoop.security.UserGroupInformation; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; /** * Helper class for ozone acls operations. @@ -60,11 +60,11 @@ public static List getAclList(String userName, List listOfAcls = new ArrayList<>(); // User ACL. - listOfAcls.add(new OzoneAcl(USER, userName, userRights, ACCESS)); + listOfAcls.add(new OzoneAcl(USER, userName, ACCESS, userRights)); if (userGroups != null) { // Group ACLs of the User. Arrays.asList(userGroups).forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, groupRights, ACCESS))); + new OzoneAcl(GROUP, group, ACCESS, groupRights))); } return listOfAcls; } @@ -91,23 +91,22 @@ public static List filterAclList(String identityName, private static boolean checkAccessInAcl(OzoneAcl a, UserGroupInformation ugi, ACLType aclToCheck) { - BitSet rights = a.getAclBitSet(); switch (a.getType()) { case USER: if (a.getName().equals(ugi.getShortUserName())) { - return checkIfAclBitIsSet(aclToCheck, rights); + return a.checkAccess(aclToCheck); } break; case GROUP: for (String grp : ugi.getGroupNames()) { if (a.getName().equals(grp)) { - return checkIfAclBitIsSet(aclToCheck, rights); + return a.checkAccess(aclToCheck); } } break; default: - return checkIfAclBitIsSet(aclToCheck, rights); + return a.checkAccess(aclToCheck); } return false; } @@ -137,56 +136,30 @@ public static boolean checkAclRights(List acls, } /** - * Helper function to check if bit for given acl is set. - * @param acl - * @param bitset - * @return True of acl bit is set else false. - * */ - public static boolean checkIfAclBitIsSet(IAccessAuthorizer.ACLType acl, - BitSet bitset) { - if (bitset == null) { - return false; - } - - return ((bitset.get(acl.ordinal()) - || bitset.get(ALL.ordinal())) - && !bitset.get(NONE.ordinal())); - } - - /** - * Helper function to inherit default ACL as access ACL for child object. - * 1. deep copy of OzoneAcl to avoid unexpected parent default ACL change - * 2. merge inherited access ACL with existing access ACL via - * OzoneUtils.addAcl(). - * @param acls - * @param parentAcls - * @return true if acls inherited DEFAULT acls from parentAcls successfully, - * false otherwise. + * Helper function to inherit default ACL with given {@code scope} for child object. + * @param acls child object ACL list + * @param parentAcls parent object ACL list + * @param scope scope applied to inherited ACL + * @return true if any ACL was inherited from parent, false otherwise */ public static boolean inheritDefaultAcls(List acls, - List parentAcls) { - List inheritedAcls = null; + List parentAcls, OzoneAcl.AclScope scope) { if (parentAcls != null && !parentAcls.isEmpty()) { - inheritedAcls = parentAcls.stream() - .filter(a -> a.getAclScope() == DEFAULT) - .map(acl -> new OzoneAcl(acl.getType(), acl.getName(), - acl.getAclBitSet(), ACCESS)) - .collect(Collectors.toList()); - } - if (inheritedAcls != null && !inheritedAcls.isEmpty()) { - inheritedAcls.stream().forEach(acl -> addAcl(acls, acl)); - return true; + Stream aclStream = parentAcls.stream() + .filter(a -> a.getAclScope() == DEFAULT); + + if (scope != DEFAULT) { + aclStream = aclStream.map(acl -> acl.withScope(scope)); + } + + List inheritedAcls = aclStream.collect(Collectors.toList()); + if (!inheritedAcls.isEmpty()) { + inheritedAcls.forEach(acl -> addAcl(acls, acl)); + return true; + } } - return false; - } - /** - * Helper function to convert the scope of ACLs to DEFAULT. - * This method is called in ACL inheritance scenarios. - * @param acls - */ - public static void toDefaultScope(List acls) { - acls.forEach(a -> a.setAclScope(DEFAULT)); + return false; } /** @@ -217,8 +190,6 @@ public static List toProtobuf(List protoAcls) { /** * Add an OzoneAcl to existing list of OzoneAcls. - * @param existingAcls - * @param acl * @return true if current OzoneAcls are changed, false otherwise. */ public static boolean addAcl(List existingAcls, OzoneAcl acl) { @@ -226,17 +197,17 @@ public static boolean addAcl(List existingAcls, OzoneAcl acl) { return false; } - for (OzoneAcl a: existingAcls) { + for (int i = 0; i < existingAcls.size(); i++) { + final OzoneAcl a = existingAcls.get(i); if (a.getName().equals(acl.getName()) && a.getType().equals(acl.getType()) && a.getAclScope().equals(acl.getAclScope())) { - BitSet current = a.getAclBitSet(); - BitSet original = (BitSet) current.clone(); - current.or(acl.getAclBitSet()); - if (current.equals(original)) { - return false; + final OzoneAcl updated = a.add(acl); + final boolean changed = !Objects.equals(updated, a); + if (changed) { + existingAcls.set(i, updated); } - return true; + return changed; } } @@ -246,8 +217,6 @@ public static boolean addAcl(List existingAcls, OzoneAcl acl) { /** * remove OzoneAcl from existing list of OzoneAcls. - * @param existingAcls - * @param acl * @return true if current OzoneAcls are changed, false otherwise. */ public static boolean removeAcl(List existingAcls, OzoneAcl acl) { @@ -255,22 +224,19 @@ public static boolean removeAcl(List existingAcls, OzoneAcl acl) { return false; } - for (OzoneAcl a: existingAcls) { + for (int i = 0; i < existingAcls.size(); i++) { + final OzoneAcl a = existingAcls.get(i); if (a.getName().equals(acl.getName()) && a.getType().equals(acl.getType()) && a.getAclScope().equals(acl.getAclScope())) { - BitSet current = a.getAclBitSet(); - BitSet original = (BitSet) current.clone(); - current.andNot(acl.getAclBitSet()); - - if (current.equals(original)) { - return false; + final OzoneAcl updated = a.remove(acl); + final boolean changed = !Objects.equals(updated, a); + if (updated.isEmpty()) { + existingAcls.remove(i); + } else if (changed) { + existingAcls.set(i, updated); } - - if (current.isEmpty()) { - existingAcls.remove(a); - } - return true; + return changed; } } return false; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java index cb1ed0976a08..20c145bd0c06 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java @@ -101,9 +101,7 @@ public S3Secret getProtobuf() { @Override public String toString() { - return "awsAccessKey=" + kerberosID + "\nawsSecret=" + awsSecret + - "\nisDeleted=" + isDeleted + "\ntransactionLogIndex=" + - transactionLogIndex; + return "awsAccessKey=" + kerberosID + "\nawsSecret=" + awsSecret; } @Override diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 56103ccb3ab8..47a48c37e8e0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -49,8 +49,8 @@ * This class is used for storing info related to Snapshots. * * Each snapshot created has an associated SnapshotInfo entry - * containing the snapshotid, snapshot path, - * snapshot checkpoint directory, previous snapshotid + * containing the snapshotId, snapshot path, + * snapshot checkpoint directory, previous snapshotId * for the snapshot path & global amongst other necessary fields. */ public final class SnapshotInfo implements Auditable, CopyObject { @@ -125,65 +125,26 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) { private long exclusiveReplicatedSize; private boolean deepCleanedDeletedDir; - /** - * Private constructor, constructed via builder. - * @param snapshotId - Snapshot UUID. - * @param name - snapshot name. - * @param volumeName - volume name. - * @param bucketName - bucket name. - * @param snapshotStatus - status: SNAPSHOT_ACTIVE, SNAPSHOT_DELETED - * @param creationTime - Snapshot creation time. - * @param deletionTime - Snapshot deletion time. - * @param pathPreviousSnapshotId - Snapshot path previous snapshot id. - * @param globalPreviousSnapshotId - Snapshot global previous snapshot id. - * @param snapshotPath - Snapshot path, bucket .snapshot path. - * @param checkpointDir - Snapshot checkpoint directory. - * @param dbTxSequenceNumber - RDB latest transaction sequence number. - * @param deepCleaned - To be deep cleaned status for snapshot. - * @param referencedSize - Snapshot referenced size. - * @param referencedReplicatedSize - Snapshot referenced size w/ replication. - * @param exclusiveSize - Snapshot exclusive size. - * @param exclusiveReplicatedSize - Snapshot exclusive size w/ replication. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private SnapshotInfo(UUID snapshotId, - String name, - String volumeName, - String bucketName, - SnapshotStatus snapshotStatus, - long creationTime, - long deletionTime, - UUID pathPreviousSnapshotId, - UUID globalPreviousSnapshotId, - String snapshotPath, - String checkpointDir, - long dbTxSequenceNumber, - boolean deepCleaned, - boolean sstFiltered, - long referencedSize, - long referencedReplicatedSize, - long exclusiveSize, - long exclusiveReplicatedSize, - boolean deepCleanedDeletedDir) { - this.snapshotId = snapshotId; - this.name = name; - this.volumeName = volumeName; - this.bucketName = bucketName; - this.snapshotStatus = snapshotStatus; - this.creationTime = creationTime; - this.deletionTime = deletionTime; - this.pathPreviousSnapshotId = pathPreviousSnapshotId; - this.globalPreviousSnapshotId = globalPreviousSnapshotId; - this.snapshotPath = snapshotPath; - this.checkpointDir = checkpointDir; - this.dbTxSequenceNumber = dbTxSequenceNumber; - this.deepClean = deepCleaned; - this.sstFiltered = sstFiltered; - this.referencedSize = referencedSize; - this.referencedReplicatedSize = referencedReplicatedSize; - this.exclusiveSize = exclusiveSize; - this.exclusiveReplicatedSize = exclusiveReplicatedSize; - this.deepCleanedDeletedDir = deepCleanedDeletedDir; + private SnapshotInfo(Builder b) { + this.snapshotId = b.snapshotId; + this.name = b.name; + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.snapshotStatus = b.snapshotStatus; + this.creationTime = b.creationTime; + this.deletionTime = b.deletionTime; + this.pathPreviousSnapshotId = b.pathPreviousSnapshotId; + this.globalPreviousSnapshotId = b.globalPreviousSnapshotId; + this.snapshotPath = b.snapshotPath; + this.checkpointDir = b.checkpointDir; + this.dbTxSequenceNumber = b.dbTxSequenceNumber; + this.deepClean = b.deepClean; + this.sstFiltered = b.sstFiltered; + this.referencedSize = b.referencedSize; + this.referencedReplicatedSize = b.referencedReplicatedSize; + this.exclusiveSize = b.exclusiveSize; + this.exclusiveReplicatedSize = b.exclusiveReplicatedSize; + this.deepCleanedDeletedDir = b.deepCleanedDeletedDir; } public void setName(String name) { @@ -338,66 +299,79 @@ public Builder() { this.snapshotStatus = SnapshotStatus.DEFAULT; } + /** @param snapshotId - Snapshot UUID. */ public Builder setSnapshotId(UUID snapshotId) { this.snapshotId = snapshotId; return this; } + /** @param name - snapshot name. */ public Builder setName(String name) { this.name = name; return this; } + /** @param volumeName - volume name. */ public Builder setVolumeName(String volumeName) { this.volumeName = volumeName; return this; } + /** @param bucketName - bucket name. */ public Builder setBucketName(String bucketName) { this.bucketName = bucketName; return this; } + /** @param snapshotStatus - status: SNAPSHOT_ACTIVE, SNAPSHOT_DELETED */ public Builder setSnapshotStatus(SnapshotStatus snapshotStatus) { this.snapshotStatus = snapshotStatus; return this; } + /** @param crTime - Snapshot creation time. */ public Builder setCreationTime(long crTime) { this.creationTime = crTime; return this; } + /** @param delTime - Snapshot deletion time. */ public Builder setDeletionTime(long delTime) { this.deletionTime = delTime; return this; } + /** @param pathPreviousSnapshotId - Snapshot path previous snapshot id. */ public Builder setPathPreviousSnapshotId(UUID pathPreviousSnapshotId) { this.pathPreviousSnapshotId = pathPreviousSnapshotId; return this; } + /** @param globalPreviousSnapshotId - Snapshot global previous snapshot id. */ public Builder setGlobalPreviousSnapshotId(UUID globalPreviousSnapshotId) { this.globalPreviousSnapshotId = globalPreviousSnapshotId; return this; } + /** @param snapshotPath - Snapshot path, bucket .snapshot path. */ public Builder setSnapshotPath(String snapshotPath) { this.snapshotPath = snapshotPath; return this; } + /** @param checkpointDir - Snapshot checkpoint directory. */ public Builder setCheckpointDir(String checkpointDir) { this.checkpointDir = checkpointDir; return this; } + /** @param dbTxSequenceNumber - RDB latest transaction sequence number. */ public Builder setDbTxSequenceNumber(long dbTxSequenceNumber) { this.dbTxSequenceNumber = dbTxSequenceNumber; return this; } + /** @param deepClean - To be deep cleaned status for snapshot. */ public Builder setDeepClean(boolean deepClean) { this.deepClean = deepClean; return this; @@ -408,21 +382,25 @@ public Builder setSstFiltered(boolean sstFiltered) { return this; } + /** @param referencedSize - Snapshot referenced size. */ public Builder setReferencedSize(long referencedSize) { this.referencedSize = referencedSize; return this; } + /** @param referencedReplicatedSize - Snapshot referenced size w/ replication. */ public Builder setReferencedReplicatedSize(long referencedReplicatedSize) { this.referencedReplicatedSize = referencedReplicatedSize; return this; } + /** @param exclusiveSize - Snapshot exclusive size. */ public Builder setExclusiveSize(long exclusiveSize) { this.exclusiveSize = exclusiveSize; return this; } + /** @param exclusiveReplicatedSize - Snapshot exclusive size w/ replication. */ public Builder setExclusiveReplicatedSize(long exclusiveReplicatedSize) { this.exclusiveReplicatedSize = exclusiveReplicatedSize; return this; @@ -435,27 +413,7 @@ public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { public SnapshotInfo build() { Preconditions.checkNotNull(name); - return new SnapshotInfo( - snapshotId, - name, - volumeName, - bucketName, - snapshotStatus, - creationTime, - deletionTime, - pathPreviousSnapshotId, - globalPreviousSnapshotId, - snapshotPath, - checkpointDir, - dbTxSequenceNumber, - deepClean, - sstFiltered, - referencedSize, - referencedReplicatedSize, - exclusiveSize, - exclusiveReplicatedSize, - deepCleanedDeletedDir - ); + return new SnapshotInfo(this); } } @@ -756,4 +714,29 @@ public SnapshotInfo copyObject() { .setDeepCleanedDeletedDir(deepCleanedDeletedDir) .build(); } + + @Override + public String toString() { + return "SnapshotInfo{" + + "snapshotId: '" + snapshotId + '\'' + + ", name: '" + name + '\'' + + ", volumeName: '" + volumeName + '\'' + + ", bucketName: '" + bucketName + '\'' + + ", snapshotStatus: '" + snapshotStatus + '\'' + + ", creationTime: '" + creationTime + '\'' + + ", deletionTime: '" + deletionTime + '\'' + + ", pathPreviousSnapshotId: '" + pathPreviousSnapshotId + '\'' + + ", globalPreviousSnapshotId: '" + globalPreviousSnapshotId + '\'' + + ", snapshotPath: '" + snapshotPath + '\'' + + ", checkpointDir: '" + checkpointDir + '\'' + + ", dbTxSequenceNumber: '" + dbTxSequenceNumber + '\'' + + ", deepClean: '" + deepClean + '\'' + + ", sstFiltered: '" + sstFiltered + '\'' + + ", referencedSize: '" + referencedSize + '\'' + + ", referencedReplicatedSize: '" + referencedReplicatedSize + '\'' + + ", exclusiveSize: '" + exclusiveSize + '\'' + + ", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' + + ", deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' + + '}'; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java deleted file mode 100644 index 6fc7c8fcc535..000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import com.google.common.base.Preconditions; - -import java.util.HashMap; -import java.util.Map; - -/** - * A class that encapsulates the createVolume Args. - */ -public final class VolumeArgs { - private final String adminName; - private final String ownerName; - private final String volume; - private final long quotaInBytes; - private final Map extendedAttributes; - - /** - * Private constructor, constructed via builder. - * - * @param adminName - Administrator name. - * @param ownerName - Volume owner's name - * @param volume - volume name - * @param quotaInBytes - Volume Quota in bytes. - * @param keyValueMap - keyValue map. - */ - private VolumeArgs(String adminName, String ownerName, String volume, - long quotaInBytes, Map keyValueMap) { - this.adminName = adminName; - this.ownerName = ownerName; - this.volume = volume; - this.quotaInBytes = quotaInBytes; - this.extendedAttributes = keyValueMap; - } - - /** - * Returns the Admin Name. - * - * @return String. - */ - public String getAdminName() { - return adminName; - } - - /** - * Returns the owner Name. - * - * @return String - */ - public String getOwnerName() { - return ownerName; - } - - /** - * Returns the volume Name. - * - * @return String - */ - public String getVolume() { - return volume; - } - - /** - * Returns Quota in Bytes. - * - * @return long, Quota in bytes. - */ - public long getQuotaInBytes() { - return quotaInBytes; - } - - public Map getExtendedAttributes() { - return extendedAttributes; - } - - static class Builder { - private String adminName; - private String ownerName; - private String volume; - private long quotaInBytes; - private Map extendedAttributes; - - /** - * Constructs a builder. - */ - Builder() { - extendedAttributes = new HashMap<>(); - } - - public void setAdminName(String adminName) { - this.adminName = adminName; - } - - public void setOwnerName(String ownerName) { - this.ownerName = ownerName; - } - - public void setVolume(String volume) { - this.volume = volume; - } - - public void setQuotaInBytes(long quotaInBytes) { - this.quotaInBytes = quotaInBytes; - } - - public void addMetadata(String key, String value) { - extendedAttributes.put(key, value); // overwrite if present. - } - - /** - * Constructs a CreateVolumeArgument. - * - * @return CreateVolumeArgs. - */ - public VolumeArgs build() { - Preconditions.checkNotNull(adminName); - Preconditions.checkNotNull(ownerName); - Preconditions.checkNotNull(volume); - return new VolumeArgs(adminName, ownerName, volume, quotaInBytes, - extendedAttributes); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java index 5c49a15a12bf..0993e9a4cdd8 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java @@ -17,29 +17,71 @@ */ package org.apache.hadoop.ozone.om.helpers; -import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; /** * Mixin class to handle custom metadata. */ -public class WithMetadata { +public abstract class WithMetadata { - @SuppressWarnings("visibilitymodifier") - protected Map metadata = new HashMap<>(); + private Map metadata; + + protected WithMetadata() { + metadata = new ConcurrentHashMap<>(); + } + + protected WithMetadata(Builder b) { + metadata = b.metadata; + } /** * Custom key value metadata. */ - public Map getMetadata() { + public final Map getMetadata() { return metadata; } /** * Set custom key value metadata. */ - public void setMetadata(Map metadata) { + public final void setMetadata(Map metadata) { this.metadata = metadata; } + /** Builder for {@link WithMetadata}. */ + public static class Builder { + private final Map metadata; + + protected Builder() { + metadata = new ConcurrentHashMap<>(); + } + + protected Builder(WithObjectID obj) { + metadata = new ConcurrentHashMap<>(obj.getMetadata()); + } + + public Builder addMetadata(String key, String value) { + metadata.put(key, value); + return this; + } + + public Builder addAllMetadata(Map additionalMetadata) { + if (additionalMetadata != null) { + metadata.putAll(additionalMetadata); + } + return this; + } + + public Builder setMetadata(Map map) { + metadata.clear(); + addAllMetadata(map); + return this; + } + + protected Map getMetadata() { + return metadata; + } + } + } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java index 0ea1a1c0e6a7..af9508196260 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java @@ -22,34 +22,34 @@ /** * Mixin class to handle ObjectID and UpdateID. */ -public class WithObjectID extends WithMetadata { +public abstract class WithObjectID extends WithMetadata { + + private long objectID; + private long updateID; + + protected WithObjectID() { + super(); + } + + protected WithObjectID(Builder b) { + super(b); + objectID = b.objectID; + updateID = b.updateID; + } /** * ObjectIDs are unique and immutable identifier for each object in the * System. */ - @SuppressWarnings("visibilitymodifier") - protected long objectID; - /** - * UpdateIDs are monotonically increasing values which are updated - * each time there is an update. - */ - @SuppressWarnings("visibilitymodifier") - protected long updateID; - - /** - * Returns objectID. - * @return long - */ - public long getObjectID() { + public final long getObjectID() { return objectID; } /** - * Returns updateID. - * @return long + * UpdateIDs are monotonically increasing values which are updated + * each time there is an update. */ - public long getUpdateID() { + public final long getUpdateID() { return updateID; } @@ -62,7 +62,7 @@ public long getUpdateID() { * * @param obId - long */ - public void setObjectID(long obId) { + public final void setObjectID(long obId) { if (this.objectID != 0 && obId != OBJECT_ID_RECLAIM_BLOCKS) { throw new UnsupportedOperationException("Attempt to modify object ID " + "which is not zero. Current Object ID is " + this.objectID); @@ -76,7 +76,7 @@ public void setObjectID(long obId) { * @param updateId long * @param isRatisEnabled boolean */ - public void setUpdateID(long updateId, boolean isRatisEnabled) { + public final void setUpdateID(long updateId, boolean isRatisEnabled) { // Because in non-HA, we have multiple rpc handler threads and // transactionID is generated in OzoneManagerServerSideTranslatorPB. @@ -103,21 +103,65 @@ public void setUpdateID(long updateId, boolean isRatisEnabled) { // Main reason, in non-HA transaction Index after restart starts from 0. // And also because of this same reason we don't do replay checks in non-HA. - if (isRatisEnabled && updateId < this.updateID) { + if (isRatisEnabled && updateId < this.getUpdateID()) { throw new IllegalArgumentException(String.format( "Trying to set updateID to %d which is not greater than the " + - "current value of %d for %s", updateId, this.updateID, + "current value of %d for %s", updateId, this.getUpdateID(), getObjectInfo())); } - this.updateID = updateId; - } - - public boolean isUpdateIDset() { - return this.updateID > 0; + this.setUpdateID(updateId); } + /** Hook method, customized in subclasses. */ public String getObjectInfo() { return this.toString(); } + + public final void setUpdateID(long updateID) { + this.updateID = updateID; + } + + /** Builder for {@link WithObjectID}. */ + public static class Builder extends WithMetadata.Builder { + private long objectID; + private long updateID; + + protected Builder() { + super(); + } + + protected Builder(WithObjectID obj) { + super(obj); + objectID = obj.getObjectID(); + updateID = obj.getUpdateID(); + } + + /** + * Sets the Object ID for this Object. + * Object ID are unique and immutable identifier for each object in the + * System. + */ + public Builder setObjectID(long obId) { + this.objectID = obId; + return this; + } + + /** + * Sets the update ID for this Object. Update IDs are monotonically + * increasing values which are updated each time there is an update. + */ + public Builder setUpdateID(long id) { + this.updateID = id; + return this; + } + + public long getObjectID() { + return objectID; + } + + public long getUpdateID() { + return updateID; + } + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java index 79a135af1726..3e228e790405 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java @@ -22,6 +22,16 @@ * Object ID with additional parent ID field. */ public class WithParentObjectId extends WithObjectID { + private long parentObjectID; + + public WithParentObjectId() { + } + + public WithParentObjectId(Builder builder) { + super(builder); + parentObjectID = builder.getParentObjectID(); + } + /** * Object ID with additional parent ID field. * @@ -45,11 +55,34 @@ public class WithParentObjectId extends WithObjectID { * key1 | 1026 | 1025 | * ------------------------------------------| */ - @SuppressWarnings("visibilitymodifier") - protected long parentObjectID; - - public long getParentObjectID() { + public final long getParentObjectID() { return parentObjectID; } + public final void setParentObjectID(long parentObjectID) { + this.parentObjectID = parentObjectID; + } + + /** Builder for {@link WithParentObjectId}. */ + public static class Builder extends WithObjectID.Builder { + private long parentObjectID; + + protected Builder() { + super(); + } + + protected Builder(WithParentObjectId obj) { + super(obj); + parentObjectID = obj.getParentObjectID(); + } + + public Builder setParentObjectID(long parentObjectId) { + this.parentObjectID = parentObjectId; + return this; + } + + protected long getParentObjectID() { + return parentObjectID; + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithTags.java similarity index 76% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithTags.java index e41c645b581a..b7f9713ee333 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithTags.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -9,13 +9,22 @@ * http://www.apache.org/licenses/LICENSE-2.0 *

    * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ +package org.apache.hadoop.ozone.om.helpers; + +import java.util.Map; + /** - * package which contains metrics classes. + * Interface to handle S3 object / bucket tags. */ -package org.apache.hadoop.ozone.om.ratis.metrics; +public interface WithTags { + + Map getTags(); + + void setTags(Map tags); +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index f41f89b181dd..b1f572358362 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.DBUpdates; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -706,6 +707,21 @@ default String createSnapshot(String volumeName, "this to be implemented"); } + /** + * Rename snapshot. + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + default void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented"); + } + /** * Delete snapshot. * @param volumeName vol to be used @@ -1112,10 +1128,10 @@ EchoRPCResponse echoRPCReq(byte[] payloadReq, int payloadSizeResp, * @param bucketName - The bucket name. * @param keyName - The key user want to recover. * @param force - force recover the file. - * @return OmKeyInfo KeyInfo of file under recovery + * @return LeaseKeyInfo KeyInfo of file under recovery * @throws IOException if an error occurs */ - OmKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) throws IOException; + LeaseKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) throws IOException; /** * Update modification time and access time of a file. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java index a4fac2be50d8..ac95728579e9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java @@ -53,13 +53,13 @@ static OmTransportFactory createFactory(ConfigurationSource conf) Iterator iterator = transportFactoryServiceLoader.iterator(); if (iterator.hasNext()) { OmTransportFactory next = iterator.next(); - LOG.info("Found OM transport implementation {} from service loader.", next.getClass().getName()); + LOG.debug("Found OM transport implementation {} from service loader.", next.getClass().getName()); return next; } // Otherwise, load the transport implementation specified by configuration. String transportClassName = conf.get(OZONE_OM_TRANSPORT_CLASS, OZONE_OM_TRANSPORT_CLASS_DEFAULT); - LOG.info("Loading OM transport implementation {} as specified by configuration.", transportClassName); + LOG.debug("Loading OM transport implementation {} as specified by configuration.", transportClassName); return OmTransportFactory.class.getClassLoader() .loadClass(transportClassName) .asSubclass(OmTransportFactory.class) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index bd40dfcf0240..b061091f4cb3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -25,6 +25,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import com.google.protobuf.Proto2Utils; import jakarta.annotation.Nonnull; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.SafeModeAction; @@ -47,6 +48,7 @@ import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -86,6 +88,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelDelegationTokenResponseProto; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest; @@ -107,6 +111,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeRequest; @@ -122,7 +128,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest; @@ -130,16 +135,16 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListOpenFilesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListOpenFilesResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest; @@ -164,6 +169,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProtoLight; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequestArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrintCompactionLogDagRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse; @@ -175,12 +185,14 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameSnapshotRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RevokeS3SecretRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SafeMode; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; @@ -188,12 +200,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetS3SecretRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetTimesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignAdminRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignUserAccessIdRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignUserAccessIdResponse; @@ -205,8 +220,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantRevokeUserAccessIdRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCResponse; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.security.acl.OzoneObj; @@ -229,17 +242,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_CALLER_CONTEXT_PREFIX; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_IN_SAFE_MODE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareRequest; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareResponse; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequest; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequestArgs; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareResponse; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusRequest; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.ACCESS_DENIED; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.DIRECTORY_ALREADY_EXISTS; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; @@ -256,8 +260,11 @@ public final class OzoneManagerProtocolClientSideTranslatorPB private OmTransport transport; private ThreadLocal threadLocalS3Auth = new ThreadLocal<>(); - private boolean s3AuthCheck; + + public static final int BLOCK_ALLOCATION_RETRY_COUNT = 5; + public static final int BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS = 3000; + public OzoneManagerProtocolClientSideTranslatorPB(OmTransport omTransport, String clientId) { this.clientID = clientId; @@ -683,7 +690,8 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { KeyArgs.Builder keyArgs = KeyArgs.newBuilder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()); + .setKeyName(args.getKeyName()) + .setOwnerName(args.getOwner()); if (args.getAcls() != null) { keyArgs.addAllAcls(args.getAcls().stream().distinct().map(a -> @@ -710,6 +718,10 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { keyArgs.addAllMetadata(KeyValueUtil.toProtobuf(args.getMetadata())); } + if (args.getTags() != null && args.getTags().size() > 0) { + keyArgs.addAllTags(KeyValueUtil.toProtobuf(args.getTags())); + } + if (args.getMultipartUploadID() != null) { keyArgs.setMultipartUploadID(args.getMultipartUploadID()); } @@ -728,8 +740,7 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { .setCreateKeyRequest(req) .build(); - CreateKeyResponse keyResponse = - handleError(submitRequest(omRequest)).getCreateKeyResponse(); + CreateKeyResponse keyResponse = handleSubmitRequestAndSCMSafeModeRetry(omRequest).getCreateKeyResponse(); return new OpenKeySession(keyResponse.getID(), OmKeyInfo.getFromProtobuf(keyResponse.getKeyInfo()), keyResponse.getOpenVersion()); @@ -774,8 +785,7 @@ public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientId, .setAllocateBlockRequest(req) .build(); - AllocateBlockResponse resp = handleError(submitRequest(omRequest)) - .getAllocateBlockResponse(); + AllocateBlockResponse resp = handleSubmitRequestAndSCMSafeModeRetry(omRequest).getAllocateBlockResponse(); return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation()); } @@ -1037,7 +1047,7 @@ public ListKeysLightResult listKeysLight(String volumeName, reqBuilder.setBucketName(bucketName); reqBuilder.setCount(maxKeys); - if (StringUtils.isNotEmpty(startKey)) { + if (startKey != null) { reqBuilder.setStartKey(startKey); } @@ -1228,6 +1238,26 @@ public String createSnapshot(String volumeName, return snapshotInfo.getName(); } + /** + * {@inheritDoc} + */ + @Override + public void renameSnapshot(String volumeName, String bucketName, + String snapshotOldName, String snapshotNewName) throws IOException { + RenameSnapshotRequest.Builder requestBuilder = + RenameSnapshotRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotOldName(snapshotOldName) + .setSnapshotNewName(snapshotNewName); + + final OMRequest omRequest = createOMRequest(Type.RenameSnapshot) + .setRenameSnapshotRequest(requestBuilder) + .build(); + final OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); + } + /** * {@inheritDoc} */ @@ -1579,8 +1609,11 @@ public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws .setVolumeName(omKeyArgs.getVolumeName()) .setBucketName(omKeyArgs.getBucketName()) .setKeyName(omKeyArgs.getKeyName()) + .addAllMetadata(KeyValueUtil.toProtobuf(omKeyArgs.getMetadata())) + .setOwnerName(omKeyArgs.getOwner()) .addAllAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) + .addAllTags(KeyValueUtil.toProtobuf(omKeyArgs.getTags())); setReplicationConfig(omKeyArgs.getReplicationConfig(), keyArgs); @@ -1636,7 +1669,8 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( .getCommitMultiPartUploadResponse(); OmMultipartCommitUploadPartInfo info = new - OmMultipartCommitUploadPartInfo(response.getPartName()); + OmMultipartCommitUploadPartInfo(response.getPartName(), + response.getETag()); return info; } @@ -1653,6 +1687,7 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setKeyName(omKeyArgs.getKeyName()) .addAllAcls(omKeyArgs.getAcls().stream().map(a -> OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) + .setOwnerName(omKeyArgs.getOwner()) .setMultipartUploadID(omKeyArgs.getMultipartUploadID()); multipartUploadCompleteRequest.setKeyArgs(keyArgs.build()); @@ -2059,6 +2094,7 @@ public void createDirectory(OmKeyArgs args) throws IOException { .setKeyName(args.getKeyName()) .addAllAcls(args.getAcls().stream().map(a -> OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) + .setOwnerName(args.getOwner()) .build(); CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder() .setKeyArgs(keyArgs) @@ -2224,7 +2260,8 @@ public OpenKeySession createFile(OmKeyArgs args, .setKeyName(args.getKeyName()) .setDataSize(args.getDataSize()) .addAllAcls(args.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) + .setOwnerName(args.getOwner()); if (args.getReplicationConfig() != null) { if (args.getReplicationConfig() instanceof ECReplicationConfig) { keyArgsBuilder.setEcReplicationConfig( @@ -2243,12 +2280,38 @@ public OpenKeySession createFile(OmKeyArgs args, OMRequest omRequest = createOMRequest(Type.CreateFile) .setCreateFileRequest(createFileRequest) .build(); - CreateFileResponse resp = - handleError(submitRequest(omRequest)).getCreateFileResponse(); + CreateFileResponse resp = handleSubmitRequestAndSCMSafeModeRetry(omRequest).getCreateFileResponse(); + return new OpenKeySession(resp.getID(), OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion()); } + + @Nonnull + private OMResponse handleSubmitRequestAndSCMSafeModeRetry(OMRequest omRequest) throws IOException { + int retryCount = BLOCK_ALLOCATION_RETRY_COUNT; + while (true) { + try { + return handleError(submitRequest(omRequest)); + } catch (OMException e) { + if (e.getResult().equals(SCM_IN_SAFE_MODE) && retryCount > 0) { + System.err.println("SCM is in safe mode. Will retry in " + + BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS + "ms"); + retryCount--; + try { + Thread.sleep(BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS); + continue; + } catch (InterruptedException ex) { + throw new OMException(ex.getMessage(), ResultCodes.SCM_IN_SAFE_MODE); + } + } else if (e.getResult().equals(SCM_IN_SAFE_MODE) && retryCount == 0) { + throw new OMException(e.getMessage(), ResultCodes.SCM_IN_SAFE_MODE); + } + throw e; + } + } + } + @Override public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries, boolean allowPartialPrefixes) @@ -2260,16 +2323,9 @@ public List listStatus(OmKeyArgs args, boolean recursive, .setSortDatanodes(args.getSortDatanodes()) .setLatestVersionLocation(args.getLatestVersionLocation()) .build(); - ListStatusRequest.Builder listStatusRequestBuilder = - ListStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .setRecursive(recursive) - .setStartKey(startKey) - .setNumEntries(numEntries); - if (allowPartialPrefixes) { - listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); - } + ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); OMRequest omRequest = createOMRequest(Type.ListStatus) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2296,16 +2352,9 @@ public List listStatusLight(OmKeyArgs args, .setSortDatanodes(false) .setLatestVersionLocation(true) .build(); - ListStatusRequest.Builder listStatusRequestBuilder = - ListStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .setRecursive(recursive) - .setStartKey(startKey) - .setNumEntries(numEntries); - if (allowPartialPrefixes) { - listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); - } + ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); OMRequest omRequest = createOMRequest(Type.ListStatusLight) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2322,6 +2371,26 @@ public List listStatusLight(OmKeyArgs args, return statusList; } + private ListStatusRequest.Builder createListStatusRequestBuilder(KeyArgs keyArgs, boolean recursive, String startKey, + long numEntries, boolean allowPartialPrefixes) { + ListStatusRequest.Builder listStatusRequestBuilder = + ListStatusRequest.newBuilder() + .setKeyArgs(keyArgs) + .setRecursive(recursive) + .setNumEntries(numEntries); + + if (startKey != null) { + listStatusRequestBuilder.setStartKey(startKey); + } else { + listStatusRequestBuilder.setStartKey(""); + } + + if (allowPartialPrefixes) { + listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); + } + return listStatusRequestBuilder; + } + @Override public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries) throws IOException { @@ -2458,12 +2527,12 @@ public CancelPrepareResponse cancelOzoneManagerPrepare() throws IOException { } @Override - public EchoRPCResponse echoRPCReq(byte[] payloadReq, int payloadSizeResp, + public EchoRPCResponse echoRPCReq(byte[] payloadReq, int payloadSizeRespBytes, boolean writeToRatis) throws IOException { EchoRPCRequest echoRPCRequest = EchoRPCRequest.newBuilder() - .setPayloadReq(ByteString.copyFrom(payloadReq)) - .setPayloadSizeResp(payloadSizeResp) + .setPayloadReq(Proto2Utils.unsafeByteString(payloadReq)) + .setPayloadSizeResp(payloadSizeRespBytes) .setReadOnly(!writeToRatis) .build(); @@ -2476,7 +2545,7 @@ public EchoRPCResponse echoRPCReq(byte[] payloadReq, int payloadSizeResp, } @Override - public OmKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) + public LeaseKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) throws IOException { RecoverLeaseRequest recoverLeaseRequest = RecoverLeaseRequest.newBuilder() @@ -2492,7 +2561,8 @@ public OmKeyInfo recoverLease(String volumeName, String bucketName, String keyNa RecoverLeaseResponse recoverLeaseResponse = handleError(submitRequest(omRequest)).getRecoverLeaseResponse(); - return OmKeyInfo.getFromProtobuf(recoverLeaseResponse.getKeyInfo()); + return new LeaseKeyInfo(OmKeyInfo.getFromProtobuf(recoverLeaseResponse.getKeyInfo()), + recoverLeaseResponse.getIsKeyInfo()); } @Override diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java index 060372f118d2..92b59f782b1c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -63,6 +63,13 @@ enum ACLType { ALL, NONE; private static int length = ACLType.values().length; + static { + if (length > 16) { + // must update getAclBytes(..) and other code + throw new AssertionError("BUG: Length = " + length + + " > 16, check the commit of this change and update the code."); + } + } private static ACLType[] vals = ACLType.values(); public static int getNoOfAcls() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index 09c8743137d4..ca32c96855dd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -186,6 +186,16 @@ public static Builder fromKeyArgs(OmKeyArgs args) { .setResType(ResourceType.KEY); } + public static Builder fromOzoneObj(OzoneObj obj) { + return new Builder() + .setVolumeName(obj.getVolumeName()) + .setBucketName(obj.getBucketName()) + .setKeyName(obj.getKeyName()) + .setResType(obj.getResourceType()) + .setStoreType(obj.getStoreType()) + .setOzonePrefixPath(obj.getOzonePrefixPathViewer()); + } + public Builder setResType(OzoneObj.ResourceType res) { this.resType = res; return this; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java index 3d14e266daa4..29bf4deb2a08 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java @@ -101,16 +101,12 @@ public String toString() { .append(" and snapshot: ") .append(getLaterSnapshotName()) .append(LINE_SEPARATOR); - if (!getDiffList().isEmpty()) { - for (DiffReportEntry entry : getDiffList()) { - str.append(entry.toString()).append(LINE_SEPARATOR); - } - if (StringUtils.isNotEmpty(token)) { - str.append("Next token: ") - .append(token); - } - } else { - str.append("No diff or no more diff for the request parameters."); + for (DiffReportEntry entry : getDiffList()) { + str.append(entry.toString()).append(LINE_SEPARATOR); + } + if (StringUtils.isNotEmpty(token)) { + str.append("Next token: ") + .append(token); } return str.toString(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/PayloadUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/PayloadUtils.java new file mode 100644 index 000000000000..90a94d8a3103 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/PayloadUtils.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.util; + +import com.google.protobuf.Proto2Utils; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.ratis.util.Preconditions; + +import java.util.Random; + +/** + * Utility class for payload operations. + */ +public final class PayloadUtils { + private static final int MAX_SIZE = 2097151 * 1024; + private static final byte[] SEED = new byte[1024]; + + static { + new Random().nextBytes(SEED); + } + + private PayloadUtils() { + } + + /** @return a new byte[] containing */ + public static byte[] generatePayload(int payloadSizeBytes) { + byte[] result = new byte[Math.min(payloadSizeBytes, MAX_SIZE)]; + + // duplicate SEED to create the required payload. + int curIdx = 0; + while (curIdx < result.length) { + int remaining = result.length - curIdx; + int copySize = Math.min(SEED.length, remaining); + System.arraycopy(SEED, 0, result, curIdx, copySize); + curIdx += copySize; + } + + Preconditions.assertTrue(curIdx == result.length); + + return result; + } + + public static com.google.protobuf.ByteString generatePayloadProto2(int payloadSizeBytes) { + return Proto2Utils.unsafeByteString(generatePayload(payloadSizeBytes)); + } + + public static ByteString generatePayloadProto3(int payloadSizeBytes) { + return UnsafeByteOperations.unsafeWrap(generatePayload(payloadSizeBytes)); + } +} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java index 08ae1fbc65b8..0b93404aff2f 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java @@ -130,132 +130,132 @@ void testAclParse() { void testAclValues() { OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(READ_ACL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertFalse(acl.isSet(ALL)); + assertFalse(acl.isSet(READ_ACL)); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:a"); assertEquals("bilbo", acl.getName()); - assertTrue(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(WRITE.ordinal())); + assertTrue(acl.isSet(ALL)); + assertFalse(acl.isSet(WRITE)); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:r"); assertEquals("bilbo", acl.getName()); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:w"); assertEquals("bilbo", acl.getName()); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(WRITE)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("group:hobbit:a"); assertEquals(acl.getName(), "hobbit"); - assertTrue(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(READ.ordinal())); + assertTrue(acl.isSet(ALL)); + assertFalse(acl.isSet(READ)); assertEquals(ACLIdentityType.GROUP, acl.getType()); acl = OzoneAcl.parseAcl("world::a"); assertEquals(acl.getName(), "WORLD"); - assertTrue(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(WRITE.ordinal())); + assertTrue(acl.isSet(ALL)); + assertFalse(acl.isSet(WRITE)); assertEquals(ACLIdentityType.WORLD, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy"); assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy"); assertEquals(acl.getName(), "hadoop"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.GROUP, acl.getType()); acl = OzoneAcl.parseAcl("world::rwdlncxy"); assertEquals(acl.getName(), "WORLD"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.WORLD, acl.getType()); // Acls with scope info. acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[DEFAULT]"); assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(acl.getAclScope(), OzoneAcl.AclScope.DEFAULT); acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(acl.getAclScope(), OzoneAcl.AclScope.ACCESS); acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy[ACCESS]"); assertEquals(acl.getName(), "hadoop"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.GROUP, acl.getType()); assertEquals(acl.getAclScope(), OzoneAcl.AclScope.ACCESS); acl = OzoneAcl.parseAcl("world::rwdlncxy[DEFAULT]"); assertEquals(acl.getName(), "WORLD"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.WORLD, acl.getType()); assertEquals(OzoneAcl.AclScope.DEFAULT, acl.getAclScope()); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java index c38c09360f01..638dd3414e86 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java @@ -81,8 +81,7 @@ public void testClone() { .setAcls(Collections.singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "defaultUser", - IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL ))) .build(); @@ -97,8 +96,7 @@ public void testClone() { omBucketInfo.setAcls(Collections.singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "newUser", - IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL ))); assertNotEquals( omBucketInfo.getAcls().get(0), @@ -115,8 +113,7 @@ public void testClone() { omBucketInfo.removeAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "newUser", - IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL )); assertEquals(0, omBucketInfo.getAcls().size()); assertEquals(1, cloneBucketInfo.getAcls().size()); @@ -131,8 +128,8 @@ public void getProtobufMessageEC() { .setStorageType(StorageType.ARCHIVE).setAcls(Collections .singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - "defaultUser", IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS))).build(); + "defaultUser", OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL + ))).build(); OzoneManagerProtocolProtos.BucketInfo protobuf = omBucketInfo.getProtobuf(); // No EC Config assertFalse(protobuf.hasDefaultReplicationConfig()); @@ -150,8 +147,8 @@ public void getProtobufMessageEC() { .setStorageType(StorageType.ARCHIVE) .setAcls(Collections.singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - "defaultUser", IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS))) + "defaultUser", OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL + ))) .setDefaultReplicationConfig( new DefaultReplicationConfig( new ECReplicationConfig(3, 2))).build(); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java index 6396f0318dcc..4aead0cd8bcb 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java @@ -182,7 +182,7 @@ private void createdAndTest(boolean isMPU) { key.setAcls(Arrays.asList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.WRITE, ACCESS))); + ACCESS, IAccessAuthorizer.ACLType.WRITE))); // Change acls and check. assertNotEquals(key, cloneKey); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfo.java index 216f7316b6c8..306d6149038f 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfo.java @@ -42,7 +42,7 @@ public class TestOmMultipartKeyInfo { @Test - public void copyObject() { + public void testCopyObject() { for (ReplicationConfig param : replicationConfigs().collect(toList())) { testCopyObject(param); } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmVolumeArgs.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmVolumeArgs.java index f5c854d9e709..41757d957a86 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmVolumeArgs.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmVolumeArgs.java @@ -46,7 +46,7 @@ public void testClone() throws Exception { .addMetadata("key1", "value1").addMetadata("key2", "value2") .addOzoneAcls( new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.READ, ACCESS)).build(); + ACCESS, IAccessAuthorizer.ACLType.READ)).build(); OmVolumeArgs cloneVolumeArgs = omVolumeArgs.copyObject(); @@ -55,7 +55,7 @@ public void testClone() throws Exception { // add user acl to write. omVolumeArgs.addAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.WRITE, ACCESS)); + ACCESS, IAccessAuthorizer.ACLType.WRITE)); // Now check clone acl assertNotEquals(cloneVolumeArgs.getAcls().get(0), @@ -64,7 +64,7 @@ public void testClone() throws Exception { // Set user acl to Write_ACL. omVolumeArgs.setAcls(Collections.singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.WRITE_ACL, ACCESS))); + ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL))); assertNotEquals(cloneVolumeArgs.getAcls().get(0), omVolumeArgs.getAcls().get(0)); @@ -78,7 +78,7 @@ public void testClone() throws Exception { omVolumeArgs.removeAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.WRITE_ACL, ACCESS)); + ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL)); // Removing acl, in original omVolumeArgs it should have no acls. assertEquals(0, omVolumeArgs.getAcls().size()); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java index 7f157860e695..35a8a95d8d02 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.List; import static org.apache.hadoop.hdds.conf.OzoneConfiguration.newInstanceOf; @@ -49,13 +48,13 @@ public class TestOzoneAclUtil { getDefaultAcls(); private static final OzoneAcl USER1 = new OzoneAcl(USER, "user1", - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); private static final OzoneAcl USER2 = new OzoneAcl(USER, "user2", - ACLType.WRITE, ACCESS); + ACCESS, ACLType.WRITE); private static final OzoneAcl GROUP1 = new OzoneAcl(GROUP, "group1", - ACLType.ALL, ACCESS); + ACCESS, ACLType.ALL); @Test public void testAddAcl() throws IOException { @@ -65,7 +64,7 @@ public void testAddAcl() throws IOException { // Add new permission to existing acl entry. OzoneAcl oldAcl = currentAcls.get(0); OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); addAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size()); // Add same permission again and verify result @@ -97,7 +96,7 @@ public void testRemoveAcl() { // Add new permission to existing acl entru. OzoneAcl oldAcl = currentAcls.get(0); OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); // Remove non existing acl entry removeAndVerifyAcl(currentAcls, USER1, false, DEFAULT_ACLS.size()); @@ -143,9 +142,12 @@ private boolean verifyAclRemoved(List acls, OzoneAcl removedAcl) { if (acl.getName().equals(removedAcl.getName()) && acl.getType().equals(removedAcl.getType()) && acl.getAclScope().equals(removedAcl.getAclScope())) { - BitSet temp = (BitSet) acl.getAclBitSet().clone(); - temp.and(removedAcl.getAclBitSet()); - return !temp.equals(removedAcl.getAclBitSet()); + for (ACLType t : removedAcl.getAclList()) { + if (acl.isSet(t)) { + return false; + } + } + return true; } } return true; @@ -156,9 +158,12 @@ private boolean verifyAclAdded(List acls, OzoneAcl newAcl) { if (acl.getName().equals(newAcl.getName()) && acl.getType().equals(newAcl.getType()) && acl.getAclScope().equals(newAcl.getAclScope())) { - BitSet temp = (BitSet) acl.getAclBitSet().clone(); - temp.and(newAcl.getAclBitSet()); - return temp.equals(newAcl.getAclBitSet()); + for (ACLType t : newAcl.getAclList()) { + if (!acl.isSet(t)) { + return false; + } + } + return true; } } return false; @@ -185,11 +190,11 @@ private static List getDefaultAcls() { IAccessAuthorizer.ACLType groupRights = aclConfig.getGroupDefaultRights(); OzoneAclUtil.addAcl(ozoneAcls, new OzoneAcl(USER, - ugi.getUserName(), userRights, ACCESS)); + ugi.getUserName(), ACCESS, userRights)); //Group ACLs of the User List userGroups = Arrays.asList(ugi.getGroupNames()); userGroups.stream().forEach((group) -> OzoneAclUtil.addAcl(ozoneAcls, - new OzoneAcl(GROUP, group, groupRights, ACCESS))); + new OzoneAcl(GROUP, group, ACCESS, groupRights))); return ozoneAcls; } @@ -226,7 +231,6 @@ public void testAddDefaultAcl() { assertEquals(2, ozoneAcls.size()); assertNotEquals(ozoneAcls.get(0).getAclScope(), ozoneAcls.get(1).getAclScope()); - assertEquals(ozoneAcls.get(0).getAclBitSet(), - ozoneAcls.get(1).getAclBitSet()); + assertEquals(ozoneAcls.get(0).getAclByteString(), ozoneAcls.get(1).getAclByteString()); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/PayloadUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestPayloadUtils.java similarity index 56% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/PayloadUtils.java rename to hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestPayloadUtils.java index d198bfcbfc5c..221a0ce6a98b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/PayloadUtils.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestPayloadUtils.java @@ -14,31 +14,21 @@ * License for the specific language governing permissions and limitations under * the License. */ +package org.apache.hadoop.ozone.util; -package org.apache.hadoop.ozone.common; - -import org.apache.commons.lang3.RandomUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; /** - * Utility class for payload operations. + * Tests {@link PayloadUtils}. */ -public final class PayloadUtils { - - private static final int RPC_PAYLOAD_MULTIPLICATION_FACTOR = 1024; - private static final int MAX_SIZE_KB = 2097151; - - private PayloadUtils() { - } - - public static byte[] generatePayloadBytes(int payloadSize) { - - byte[] payloadBytes = new byte[0]; - int payloadRespSize = - Math.min(payloadSize * RPC_PAYLOAD_MULTIPLICATION_FACTOR, MAX_SIZE_KB); - if (payloadRespSize > 0) { - payloadBytes = RandomUtils.nextBytes(payloadRespSize); - } +public class TestPayloadUtils { - return payloadBytes; + @ParameterizedTest + @ValueSource(ints = {0, 1, 1023, 1024, 1025, 2048}) + public void testGeneratePayload(int payload) { + byte[] generated = PayloadUtils.generatePayload(payload); + Assertions.assertEquals(payload, generated.length); } } diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index b28db73aed6e..d40a995ab920 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -46,6 +46,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.j2objc j2objc-annotations + + com.google.code.findbugs + jsr305 + @@ -62,6 +66,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + com.google.code.findbugs + jsr305 + 3.0.2 + provided + com.google.guava guava @@ -109,6 +119,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.protobuf protobuf-java + + com.google.code.findbugs + jsr305 + diff --git a/hadoop-ozone/dev-support/checks/_lib.sh b/hadoop-ozone/dev-support/checks/_lib.sh index b81acf989930..134c8f53c6e8 100644 --- a/hadoop-ozone/dev-support/checks/_lib.sh +++ b/hadoop-ozone/dev-support/checks/_lib.sh @@ -149,3 +149,18 @@ install_spotbugs() { _install_spotbugs() { curl -LSs https://repo.maven.apache.org/maven2/com/github/spotbugs/spotbugs/3.1.12/spotbugs-3.1.12.tgz | tar -xz -f - } + +download_hadoop_aws() { + local dir="$1" + + if [[ -z ${dir} ]]; then + echo "Required argument: target directory for Hadoop AWS sources" >&2 + return 1 + fi + + if [[ ! -e "${dir}" ]] || [[ ! -d "${dir}"/src/test/resources ]]; then + mkdir -p "${dir}" + [[ -f "${dir}.tar.gz" ]] || curl -LSs -o "${dir}.tar.gz" https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz + tar -x -z -C "${dir}" --strip-components=3 -f "${dir}.tar.gz" --wildcards 'hadoop-*-src/hadoop-tools/hadoop-aws' || return 1 + fi +} diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index 5139dddcd8c1..36205c69bb64 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -35,7 +35,7 @@ find "." -not -path '*/iteration*' -name 'TEST*.xml' -print0 \ > "${tempfile}" if [[ "${CHECK:-unit}" == "integration" ]]; then - find "." -not -path '*/iteration*' -name '*-output.txt' -print0 \ + find hadoop-ozone/integration-test -not -path '*/iteration*' -name '*-output.txt' -print0 \ | xargs -n1 -0 "grep" -l -E "not closed properly|was not shutdown properly" \ | awk -F/ '{sub("-output.txt",""); print $NF}' \ >> "${tempfile}" @@ -81,8 +81,8 @@ for failed_test in $(< ${REPORT_DIR}/summary.txt); do \( -name "${failed_test}.txt" -or -name "${failed_test}-output.txt" -or -name "TEST-${failed_test}.xml" \)); do dir=$(dirname "${file}") dest_dir=$(_realpath --relative-to="${PWD}" "${dir}/../..") || continue - mkdir -p "${REPORT_DIR}/${dest_dir}" - mv "${file}" "${REPORT_DIR}/${dest_dir}"/ + mkdir -pv "${REPORT_DIR}/${dest_dir}" + mv -v "${file}" "${REPORT_DIR}/${dest_dir}"/ done done diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh index 0489fa24384a..5be3f7b5879a 100755 --- a/hadoop-ozone/dev-support/checks/acceptance.sh +++ b/hadoop-ozone/dev-support/checks/acceptance.sh @@ -19,15 +19,20 @@ set -u -o pipefail DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 -source "${DIR}/_lib.sh" +OZONE_ROOT=$(pwd -P) + +: ${HADOOP_AWS_DIR:=""} +: ${OZONE_ACCEPTANCE_SUITE:=""} +: ${OZONE_TEST_SELECTOR:=""} +: ${OZONE_ACCEPTANCE_TEST_TYPE:="robot"} +: ${OZONE_WITH_COVERAGE:="false"} -install_virtualenv -install_robot +source "${DIR}/_lib.sh" -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/acceptance"} +REPORT_DIR=${OUTPUT_DIR:-"${OZONE_ROOT}/target/acceptance"} OZONE_VERSION=$(mvn help:evaluate -Dexpression=ozone.version -q -DforceStdout) -DIST_DIR="$DIR/../../dist/target/ozone-$OZONE_VERSION" +DIST_DIR="${OZONE_ROOT}/hadoop-ozone/dist/target/ozone-$OZONE_VERSION" if [ ! -d "$DIST_DIR" ]; then echo "Distribution dir is missing. Doing a full build" @@ -36,15 +41,42 @@ fi mkdir -p "$REPORT_DIR" -export OZONE_ACCEPTANCE_SUITE +if [[ "${OZONE_ACCEPTANCE_SUITE}" == "s3a" ]]; then + OZONE_ACCEPTANCE_TEST_TYPE="maven" + + if [[ -z "${HADOOP_AWS_DIR}" ]]; then + HADOOP_VERSION=$(mvn help:evaluate -Dexpression=hadoop.version -q -DforceStdout) + export HADOOP_AWS_DIR=${OZONE_ROOT}/target/hadoop-src + fi + + download_hadoop_aws "${HADOOP_AWS_DIR}" +fi + +if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "robot" ]]; then + install_virtualenv + install_robot +fi + +export OZONE_ACCEPTANCE_SUITE OZONE_ACCEPTANCE_TEST_TYPE cd "$DIST_DIR/compose" || exit 1 ./test-all.sh 2>&1 | tee "${REPORT_DIR}/output.log" RES=$? -cp -rv result/* "$REPORT_DIR/" -cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" -find "$REPORT_DIR" -type f -empty -print0 | xargs -0 rm -v -grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_DIR}/summary.txt" +if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "maven" ]]; then + pushd result + source "${DIR}/_mvn_unit_report.sh" + find . -name junit -print0 | xargs -r -0 rm -frv + cp -rv * "${REPORT_DIR}"/ + popd +else + cp -rv result/* "$REPORT_DIR/" + if [[ -f "${REPORT_DIR}/log.html" ]]; then + cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" + fi + grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_DIR}/summary.txt" +fi + +find "$REPORT_DIR" -type f -empty -not -name summary.txt -print0 | xargs -0 rm -v exit $RES diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh index cb8b6f8f9151..18ae39059755 100755 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ b/hadoop-ozone/dev-support/checks/checkstyle.sh @@ -42,7 +42,7 @@ cat "${REPORT_DIR}/output.log" find "." -name checkstyle-errors.xml -print0 \ | xargs -0 sed '$!N; //d' \ + -e '//d' \ -e '//dev/null 2>&1 && pwd )" CHECK=integration -source "${DIR}/junit.sh" -pl :ozone-integration-test,:mini-chaos-tests "$@" +source "${DIR}/junit.sh" "$@" diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh index 768a1f32a38b..422de1381034 100755 --- a/hadoop-ozone/dev-support/checks/junit.sh +++ b/hadoop-ozone/dev-support/checks/junit.sh @@ -50,10 +50,8 @@ if [[ -f hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh ]]; then ozone_java_setup fi -if [[ "${CHECK}" == "integration" ]] || [[ ${ITERATIONS} -gt 1 ]]; then - if [[ ${OZONE_REPO_CACHED} == "false" ]]; then - mvn ${MAVEN_OPTIONS} -DskipTests clean install - fi +if [[ ${ITERATIONS} -gt 1 ]] && [[ ${OZONE_REPO_CACHED} == "false" ]]; then + mvn ${MAVEN_OPTIONS} -DskipTests clean install fi REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/${CHECK}"} @@ -79,7 +77,7 @@ for i in $(seq 1 ${ITERATIONS}); do fi if [[ ${ITERATIONS} -gt 1 ]]; then - if ! grep -q "Tests run: [^0]" "${REPORT_DIR}/output.log"; then + if ! grep -q "Running .*Test" "${REPORT_DIR}/output.log"; then echo "No tests were run" >> "${REPORT_DIR}/summary.txt" irc=1 FAIL_FAST=true diff --git a/hadoop-ozone/dev-support/checks/native.sh b/hadoop-ozone/dev-support/checks/native.sh index 1eeca5c0f3d9..bdeb1386815d 100755 --- a/hadoop-ozone/dev-support/checks/native.sh +++ b/hadoop-ozone/dev-support/checks/native.sh @@ -14,25 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -#checks:unit +#checks:native DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=native -zlib_version=$(mvn -N help:evaluate -Dexpression=zlib.version -q -DforceStdout) -if [[ -z "${zlib_version}" ]]; then - echo "ERROR zlib.version not defined in pom.xml" - exit 1 -fi - -bzip2_version=$(mvn -N help:evaluate -Dexpression=bzip2.version -q -DforceStdout) -if [[ -z "${bzip2_version}" ]]; then - echo "ERROR bzip2.version not defined in pom.xml" - exit 1 -fi - -source "${DIR}/junit.sh" -Pnative -Drocks_tools_native \ - -Dbzip2.url="https://github.com/libarchive/bzip2/archive/refs/tags/bzip2-${bzip2_version}.tar.gz" \ - -Dzlib.url="https://github.com/madler/zlib/releases/download/v${zlib_version}/zlib-${zlib_version}.tar.gz" \ - -DexcludedGroups="unhealthy" \ +source "${DIR}/junit.sh" -Pnative -Drocks_tools_native -DexcludedGroups="unhealthy" \ "$@" diff --git a/hadoop-ozone/dev-support/checks/rat.sh b/hadoop-ozone/dev-support/checks/rat.sh index 32a10349026c..2bdb66ba1198 100755 --- a/hadoop-ozone/dev-support/checks/rat.sh +++ b/hadoop-ozone/dev-support/checks/rat.sh @@ -24,13 +24,7 @@ mkdir -p "$REPORT_DIR" REPORT_FILE="$REPORT_DIR/summary.txt" -dirs="hadoop-hdds hadoop-ozone" - -for d in $dirs; do - pushd "$d" || exit 1 - mvn -B --no-transfer-progress -fn org.apache.rat:apache-rat-plugin:0.13:check - popd -done +mvn -B --no-transfer-progress -fn org.apache.rat:apache-rat-plugin:check "$@" grep -r --include=rat.txt "!????" $dirs | tee "$REPORT_FILE" diff --git a/hadoop-ozone/dev-support/checks/unit.sh b/hadoop-ozone/dev-support/checks/unit.sh index d2d50c5ff03f..8e540fa9e141 100755 --- a/hadoop-ozone/dev-support/checks/unit.sh +++ b/hadoop-ozone/dev-support/checks/unit.sh @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#checks:unit - DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=unit source "${DIR}/junit.sh" \ diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml index 171494aa5dbe..df9c4c0ab3e6 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml @@ -18,7 +18,7 @@ - - org.junit.jupiter - junit-jupiter-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 26f896663b81..3c12bab4323b 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -261,10 +261,10 @@ protected void initializeConfiguration() throws IOException { TimeUnit.SECONDS); conf.setInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, 4); conf.setInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, 2); conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); ReplicationManagerConfiguration replicationConf = @@ -273,34 +273,21 @@ protected void initializeConfiguration() throws IOException { replicationConf.setEventTimeout(Duration.ofSeconds(20)); replicationConf.setDatanodeTimeoutOffset(0); conf.setFromObject(replicationConf); - conf.setInt(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, 100); + conf.setInt(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100); + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, 100); conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, 100); conf.setInt(OMConfigKeys. OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, 100); } - /** - * Sets the number of data volumes per datanode. - * - * @param val number of volumes per datanode. - * - * @return MiniOzoneCluster.Builder - */ - @Override - public Builder setNumDataVolumes(int val) { - numDataVolumes = val; - return this; - } - @Override public MiniOzoneChaosCluster build() throws IOException { DefaultMetricsSystem.setMiniClusterMode(true); DatanodeStoreCache.setMiniClusterMode(); initializeConfiguration(); - if (numOfOMs > 1) { + if (numberOfOzoneManagers() > 1) { initOMRatisConf(); } @@ -313,8 +300,7 @@ public MiniOzoneChaosCluster build() throws IOException { throw new IOException("Unable to build MiniOzoneCluster. ", ex); } - final List hddsDatanodes = createHddsDatanodes( - scmService.getActiveServices(), null); + final List hddsDatanodes = createHddsDatanodes(); MiniOzoneChaosCluster cluster = new MiniOzoneChaosCluster(conf, omService, scmService, hddsDatanodes, @@ -323,6 +309,7 @@ public MiniOzoneChaosCluster build() throws IOException { if (startDataNodes) { cluster.startHddsDatanodes(); } + prepareForNextBuild(); return cluster; } } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java index 6894aed25ab6..5be5c3ef0c5b 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java @@ -130,7 +130,9 @@ public static void init() throws Exception { .setOMServiceID(omServiceId) .setNumStorageContainerManagers(numStorageContainerManagerss) .setSCMServiceID(scmServiceId) - .setNumDataVolumes(numDataVolumes); + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setNumDataVolumes(numDataVolumes) + .build()); failureClasses.forEach(chaosBuilder::addFailures); cluster = chaosBuilder.build(); diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index 7de9bcc297da..432faab48777 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -33,4 +33,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> mini-chaos-tests + + + org.mockito + mockito-inline + test + + + diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index f56b4006d852..582e6c1034a7 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -201,7 +201,7 @@ org.apache.maven.plugins maven-eclipse-plugin - 2.6 + 2.10 org.apache.maven.plugins @@ -242,10 +242,6 @@ - - org.apache.rat - apache-rat-plugin - org.apache.maven.plugins maven-antrun-plugin diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java index e35ac3660d0a..0ba313e18b89 100644 --- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java +++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java @@ -282,8 +282,10 @@ public Response get(@PathParam("path") String path, response = handleListStatus(path, params, user); break; case GETHOMEDIRECTORY: - response = handleGetHomeDir(path, op, user); - break; + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support GETHOMEDIRECTORY"); + //response = handleGetHomeDir(path, op, user); + //break; case INSTRUMENTATION: response = handleInstrumentation(path, op, user); break; @@ -316,8 +318,10 @@ public Response get(@PathParam("path") String path, //response = handleListStatusBatch(path, params, user); //break; case GETTRASHROOT: - response = handleGetTrashRoot(path, user); - break; + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support GETTRASHROOT"); + //response = handleGetTrashRoot(path, user); + //break; case GETALLSTORAGEPOLICY: response = handleGetAllStoragePolicy(path, user); break; diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 913cd639bf7c..7de7b2f936ec 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -119,11 +119,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-jar test - - org.junit.jupiter - junit-jupiter-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java index 51d75c07d2d0..3bb387440736 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,6 +43,7 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB private Path testPath; private Path target; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java index ff5ed3b0624b..9845caad45d1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,6 +45,7 @@ public abstract class AbstractContractConcatTest extends AbstractFSContractTestB private Path zeroByteFile; private Path target; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java index 0d6c30e52c0f..8beaff7e8b2c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java @@ -25,6 +25,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.apache.commons.io.FileUtils; @@ -44,6 +45,7 @@ public abstract class AbstractContractCopyFromLocalTest extends private static final Charset ASCII = StandardCharsets.US_ASCII; private File file; + @AfterEach @Override public void teardown() throws Exception { super.teardown(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java index 21290d1e889f..12cfba2312a7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.contract.ContractTestUtils.createSubdirs; @@ -63,6 +64,7 @@ public abstract class AbstractContractGetFileStatusTest extends private static final int TREE_FILES = 4; private static final int TREE_FILESIZE = 512; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 86363b55ccff..51474945fb81 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -30,6 +30,8 @@ import com.google.common.base.Charsets; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -83,6 +85,7 @@ public abstract class AbstractContractMultipartUploaderTest extends private UploadHandle activeUpload; private Path activeUploadPath; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -96,6 +99,7 @@ public void setup() throws Exception { uploader1 = fs.createMultipartUploader(testPath).build(); } + @AfterEach @Override public void teardown() throws Exception { MultipartUploader uploader = getUploader(1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java index 166e8e301e40..51ebd4437b9f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java @@ -45,6 +45,7 @@ import static org.assertj.core.api.Assertions.fail; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; /** @@ -62,6 +63,7 @@ protected Configuration createConfiguration() { return conf; } + @AfterEach @Override public void teardown() throws Exception { IOUtils.closeStream(instream); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java index 3ff3f72cc6e6..d164a7144b0a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.assertj.core.api.Assertions; import org.slf4j.Logger; @@ -58,6 +59,7 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class); public static final int OBJECTSTORE_RETRY_TIMEOUT = 30000; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java index 618025dc06f7..af259f600e27 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java @@ -26,6 +26,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,6 +55,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas private Path zeroByteFile; private FSDataInputStream instream; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -74,6 +77,7 @@ protected Configuration createConfiguration() { return conf; } + @AfterEach @Override public void teardown() throws Exception { IOUtils.closeStream(instream); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java index b9a86ae366cd..82efce828142 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java @@ -21,6 +21,7 @@ import java.io.FileNotFoundException; import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,6 +39,7 @@ public abstract class AbstractContractSetTimesTest extends private Path testPath; private Path target; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java index 07c4f26543a8..3a58d2124d8e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.contract; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -38,6 +39,7 @@ public abstract class AbstractContractUnbufferTest extends AbstractFSContractTes private Path file; private byte[] fileBytes; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index a9fc2710ce31..d92ddef7be23 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -110,6 +110,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities; import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; +import static org.apache.hadoop.fs.ozone.OzoneFileSystemTests.createKeyWithECReplicationConfiguration; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -427,6 +428,19 @@ public void testCreateDoesNotAddParentDirKeys() throws Exception { assertTrue(fs.getFileStatus(parent).isDirectory(), "Parent directory does not appear to be a directory"); } + @Test + public void testCreateKeyWithECReplicationConfig() throws Exception { + Path root = new Path("/" + volumeName + "/" + bucketName); + Path testKeyPath = new Path(root, "testKey"); + createKeyWithECReplicationConfiguration(cluster.getConf(), testKeyPath); + + OzoneKeyDetails key = getKey(testKeyPath, false); + assertEquals(HddsProtos.ReplicationType.EC, + key.getReplicationConfig().getReplicationType()); + assertEquals("rs-3-2-1024k", + key.getReplicationConfig().getReplication()); + } + @Test public void testDeleteCreatesFakeParentDir() throws Exception { Path grandparent = new Path("/testDeleteCreatesFakeParentDir"); @@ -610,6 +624,100 @@ public void testListStatus() throws Exception { assertEquals(3, fileStatuses.length, "FileStatus did not return all children of the directory"); } + @Test + public void testObjectOwner() throws Exception { + // Save the old user, and switch to the old user after test + UserGroupInformation oldUser = UserGroupInformation.getCurrentUser(); + try { + // user1 create file /file1 + // user2 create directory /dir1 + // user3 create file /dir1/file2 + UserGroupInformation user1 = UserGroupInformation + .createUserForTesting("user1", new String[] {"user1"}); + UserGroupInformation user2 = UserGroupInformation + .createUserForTesting("user2", new String[] {"user2"}); + UserGroupInformation user3 = UserGroupInformation + .createUserForTesting("user3", new String[] {"user3"}); + Path root = new Path("/"); + Path file1 = new Path(root, "file1"); + Path dir1 = new Path(root, "dir1"); + Path file2 = new Path(dir1, "file2"); + FileStatus[] fileStatuses = o3fs.listStatus(root); + assertEquals(0, fileStatuses.length); + + UserGroupInformation.setLoginUser(user1); + fs = FileSystem.get(cluster.getConf()); + ContractTestUtils.touch(fs, file1); + UserGroupInformation.setLoginUser(user2); + fs = FileSystem.get(cluster.getConf()); + fs.mkdirs(dir1); + UserGroupInformation.setLoginUser(user3); + fs = FileSystem.get(cluster.getConf()); + ContractTestUtils.touch(fs, file2); + + assertEquals(2, o3fs.listStatus(root).length); + assertEquals(1, o3fs.listStatus(dir1).length); + assertEquals(user1.getShortUserName(), + fs.getFileStatus(file1).getOwner()); + assertEquals(user2.getShortUserName(), + fs.getFileStatus(dir1).getOwner()); + assertEquals(user3.getShortUserName(), + fs.getFileStatus(file2).getOwner()); + } finally { + UserGroupInformation.setLoginUser(oldUser); + fs = FileSystem.get(cluster.getConf()); + } + } + + @Test + public void testObjectProxyUser() throws Exception { + // Save the old user, and switch to the old user after test + UserGroupInformation oldUser = UserGroupInformation.getCurrentUser(); + try { + // user1ProxyUser create file /file1 + // user2ProxyUser create directory /dir1 + // user3ProxyUser create file /dir1/file2 + String proxyUserName = "proxyuser"; + UserGroupInformation proxyuser = UserGroupInformation + .createUserForTesting(proxyUserName, new String[] {"user1"}); + Path root = new Path("/"); + Path file1 = new Path(root, "file1"); + Path dir1 = new Path(root, "dir1"); + Path file2 = new Path(dir1, "file2"); + + UserGroupInformation user1ProxyUser = + UserGroupInformation.createProxyUser("user1", proxyuser); + UserGroupInformation user2ProxyUser = + UserGroupInformation.createProxyUser("user2", proxyuser); + UserGroupInformation user3ProxyUser = + UserGroupInformation.createProxyUser("user3", proxyuser); + FileStatus[] fileStatuses = o3fs.listStatus(root); + assertEquals(0, fileStatuses.length); + + UserGroupInformation.setLoginUser(user1ProxyUser); + fs = FileSystem.get(cluster.getConf()); + ContractTestUtils.touch(fs, file1); + UserGroupInformation.setLoginUser(user2ProxyUser); + fs = FileSystem.get(cluster.getConf()); + fs.mkdirs(dir1); + UserGroupInformation.setLoginUser(user3ProxyUser); + fs = FileSystem.get(cluster.getConf()); + ContractTestUtils.touch(fs, file2); + + assertEquals(2, o3fs.listStatus(root).length); + assertEquals(1, o3fs.listStatus(dir1).length); + assertEquals(user1ProxyUser.getShortUserName(), + fs.getFileStatus(file1).getOwner()); + assertEquals(user2ProxyUser.getShortUserName(), + fs.getFileStatus(dir1).getOwner()); + assertEquals(user3ProxyUser.getShortUserName(), + fs.getFileStatus(file2).getOwner()); + } finally { + UserGroupInformation.setLoginUser(oldUser); + fs = FileSystem.get(cluster.getConf()); + } + } + @Test public void testListStatusWithIntermediateDir() throws Exception { assumeFalse(FILE_SYSTEM_OPTIMIZED.equals(getBucketLayout())); @@ -622,6 +730,7 @@ public void testListStatusWithIntermediateDir() throws Exception { .setAcls(Collections.emptyList()) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .setLocationInfoList(new ArrayList<>()) + .setOwnerName("user" + RandomStringUtils.randomNumeric(5)) .build(); OpenKeySession session = writeClient.openKey(keyArgs); @@ -658,6 +767,8 @@ public void testListStatusWithIntermediateDirWithECEnabled() .setAcls(Collections.emptyList()) .setReplicationConfig(new ECReplicationConfig(3, 2)) .setLocationInfoList(new ArrayList<>()) + .setOwnerName( + UserGroupInformation.getCurrentUser().getShortUserName()) .build(); OpenKeySession session = writeClient.openKey(keyArgs); writeClient.commitKey(keyArgs, session.getId()); @@ -2141,4 +2252,26 @@ private void assertCounter(long value, String key) { assertEquals(value, statistics.getLong(key).longValue()); } + @Test + void testSnapshotRead() throws Exception { + // Init data + Path snapPath1 = fs.createSnapshot(new Path("/"), "snap1"); + + Path file1 = new Path("/key1"); + Path file2 = new Path("/key2"); + ContractTestUtils.touch(fs, file1); + ContractTestUtils.touch(fs, file2); + Path snapPath2 = fs.createSnapshot(new Path("/"), "snap2"); + + Path file3 = new Path("/key3"); + ContractTestUtils.touch(fs, file3); + Path snapPath3 = fs.createSnapshot(new Path("/"), "snap3"); + + FileStatus[] f1 = fs.listStatus(snapPath1); + FileStatus[] f2 = fs.listStatus(snapPath2); + FileStatus[] f3 = fs.listStatus(snapPath3); + assertEquals(0, f1.length); + assertEquals(2, f2.length); + assertEquals(3, f3.length); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java index f0ff1ab43b49..49afba7b3803 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -497,14 +496,14 @@ public void testCreateFile() throws Exception { @Test public void testLeaseRecoverable() throws Exception { // Create a file - Path parent = new Path("/d1/d2/"); + Path parent = new Path("/d1"); Path source = new Path(parent, "file1"); LeaseRecoverable fs = (LeaseRecoverable)getFs(); FSDataOutputStream stream = getFs().create(source); try { // file not visible yet - assertThrows(OMException.class, () -> fs.isFileClosed(source)); + assertThrows(FileNotFoundException.class, () -> fs.isFileClosed(source)); stream.write(1); stream.hsync(); // file is visible and open diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index d44342acc432..a092890ae2a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.ozone.OzoneAcl; @@ -70,6 +71,9 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.tools.DistCpOptions; +import org.apache.hadoop.tools.mapred.CopyMapper; import org.apache.hadoop.util.ToolRunner; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; @@ -78,6 +82,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -88,9 +94,9 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.Collection; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; @@ -109,6 +115,7 @@ import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities; import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; +import static org.apache.hadoop.fs.ozone.OzoneFileSystemTests.createKeyWithECReplicationConfiguration; import static org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec.RS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -122,7 +129,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.DELETE; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -321,6 +327,19 @@ void testCreateDoesNotAddParentDirKeys() throws Exception { fs.delete(grandparent, true); } + @Test + public void testCreateKeyWithECReplicationConfig() throws Exception { + String testKeyName = "testKey"; + Path testKeyPath = new Path(bucketPath, testKeyName); + createKeyWithECReplicationConfiguration(cluster.getConf(), testKeyPath); + + OzoneKeyDetails key = getKey(testKeyPath, false); + assertEquals(HddsProtos.ReplicationType.EC, + key.getReplicationConfig().getReplicationType()); + assertEquals("rs-3-2-1024k", + key.getReplicationConfig().getReplication()); + } + @Test void testListStatusWithIntermediateDirWithECEnabled() throws Exception { @@ -1183,21 +1202,15 @@ void testSharedTmpDir() throws IOException { ACLType userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access // ACL admin owner, world read+write - BitSet aclRights = new BitSet(); - aclRights.set(READ.ordinal()); - aclRights.set(WRITE.ordinal()); - List objectAcls = new ArrayList<>(); - objectAcls.add(new OzoneAcl(ACLIdentityType.WORLD, "", - aclRights, ACCESS)); - objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, - ACCESS)); + EnumSet aclRights = EnumSet.of(READ, WRITE); // volume acls have all access to admin and read+write access to world // Construct VolumeArgs - VolumeArgs volumeArgs = new VolumeArgs.Builder() + VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setAdmin("admin") .setOwner("admin") - .setAcls(Collections.unmodifiableList(objectAcls)) + .addAcl(new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, aclRights)) + .addAcl(new OzoneAcl(ACLIdentityType.USER, "admin", ACCESS, userRights)) .setQuotaInNamespace(1000) .setQuotaInBytes(Long.MAX_VALUE).build(); // Sanity check @@ -1228,20 +1241,11 @@ void testSharedTmpDir() throws IOException { } // set acls for shared tmp mount under the tmp volume - objectAcls.clear(); - objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, - ACCESS)); - aclRights.clear(DELETE.ordinal()); - aclRights.set(LIST.ordinal()); - objectAcls.add(new OzoneAcl(ACLIdentityType.WORLD, "", - aclRights, ACCESS)); - objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, - ACCESS)); // bucket acls have all access to admin and read+write+list access to world - BucketArgs bucketArgs = new BucketArgs.Builder() .setOwner("admin") - .setAcls(Collections.unmodifiableList(objectAcls)) + .addAcl(new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, READ, WRITE, LIST)) + .addAcl(new OzoneAcl(ACLIdentityType.USER, "admin", ACCESS, userRights)) .setQuotaInNamespace(1000) .setQuotaInBytes(Long.MAX_VALUE).build(); @@ -1301,10 +1305,10 @@ void testTempMount() throws IOException { ACLType userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", - userRights, ACCESS); + ACCESS, userRights); // Construct VolumeArgs - VolumeArgs volumeArgs = new VolumeArgs.Builder() - .setAcls(Collections.singletonList(aclWorldAccess)) + VolumeArgs volumeArgs = VolumeArgs.newBuilder() + .addAcl(aclWorldAccess) .setQuotaInNamespace(1000).build(); // Sanity check assertNull(volumeArgs.getOwner()); @@ -2302,10 +2306,10 @@ void testNonPrivilegedUserMkdirCreateBucket() throws IOException { ACLType userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", - userRights, ACCESS); + ACCESS, userRights); // Construct VolumeArgs, set ACL to world access - VolumeArgs volumeArgs = new VolumeArgs.Builder() - .setAcls(Collections.singletonList(aclWorldAccess)) + VolumeArgs volumeArgs = VolumeArgs.newBuilder() + .addAcl(aclWorldAccess) .build(); proxy.createVolume(volume, volumeArgs); @@ -2336,6 +2340,20 @@ private void createLinkBucket(String linkVolume, String linkBucket, ozoneVolume.createBucket(linkBucket, builder.build()); } + private Path createAndGetBucketPath() + throws IOException { + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setStorageType(StorageType.DISK); + builder.setBucketLayout(bucketLayout); + BucketArgs omBucketArgs = builder.build(); + String vol = UUID.randomUUID().toString(); + String buck = UUID.randomUUID().toString(); + final OzoneBucket bucket = + TestDataUtil.createVolumeAndBucket(client, vol, buck, omBucketArgs); + Path volume = new Path(OZONE_URI_DELIMITER, bucket.getVolumeName()); + return new Path(volume, bucket.getName()); + } + @Test void testSnapshotRead() throws Exception { if (useOnlyCache) { @@ -2477,4 +2495,113 @@ void testSetTimes() throws Exception { assertEquals(mtime, fileStatus.getModificationTime()); } + @Test + public void testSetTimesForLinkedBucketPath() throws Exception { + // Create a file + OzoneBucket sourceBucket = + TestDataUtil.createVolumeAndBucket(client, bucketLayout); + Path volumePath1 = + new Path(OZONE_URI_DELIMITER, sourceBucket.getVolumeName()); + Path sourceBucketPath = new Path(volumePath1, sourceBucket.getName()); + Path path = new Path(sourceBucketPath, "key1"); + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(1); + } + OzoneVolume sourceVol = client.getObjectStore().getVolume(sourceBucket.getVolumeName()); + String linkBucketName = UUID.randomUUID().toString(); + createLinkBucket(sourceVol.getName(), linkBucketName, + sourceVol.getName(), sourceBucket.getName()); + + Path linkedBucketPath = new Path(volumePath1, linkBucketName); + Path keyInLinkedBucket = new Path(linkedBucketPath, "key1"); + + // test setTimes in linked bucket path + long mtime = 1000; + fs.setTimes(keyInLinkedBucket, mtime, 2000); + + FileStatus fileStatus = fs.getFileStatus(path); + // verify that mtime is updated as expected. + assertEquals(mtime, fileStatus.getModificationTime()); + + long mtimeDontUpdate = -1; + fs.setTimes(keyInLinkedBucket, mtimeDontUpdate, 2000); + + fileStatus = fs.getFileStatus(keyInLinkedBucket); + // verify that mtime is NOT updated as expected. + assertEquals(mtime, fileStatus.getModificationTime()); + } + + @ParameterizedTest(name = "Source Replication Factor = {0}") + @ValueSource(shorts = { 1, 3 }) + public void testDistcp(short sourceRepFactor) throws Exception { + Path srcBucketPath = createAndGetBucketPath(); + Path insideSrcBucket = new Path(srcBucketPath, "*"); + Path dstBucketPath = createAndGetBucketPath(); + // create 2 files on source + List fileNames = createFiles(srcBucketPath, 2, sourceRepFactor); + // Create target directory/bucket + fs.mkdirs(dstBucketPath); + + // perform distcp + DistCpOptions options = + new DistCpOptions.Builder(Collections.singletonList(insideSrcBucket), + dstBucketPath).build(); + options.appendToConf(conf); + Job distcpJob = new DistCp(conf, options).execute(); + verifyCopy(dstBucketPath, distcpJob, 2, 2); + FileStatus sourceFileStatus = fs.listStatus(srcBucketPath)[0]; + FileStatus dstFileStatus = fs.listStatus(dstBucketPath)[0]; + assertEquals(sourceRepFactor, sourceFileStatus.getReplication()); + // without preserve distcp should create file with default replication + assertEquals(fs.getDefaultReplication(dstBucketPath), + dstFileStatus.getReplication()); + + deleteFiles(dstBucketPath, fileNames); + + // test preserve option + options = + new DistCpOptions.Builder(Collections.singletonList(insideSrcBucket), + dstBucketPath).preserve(DistCpOptions.FileAttribute.REPLICATION) + .build(); + options.appendToConf(conf); + distcpJob = new DistCp(conf, options).execute(); + verifyCopy(dstBucketPath, distcpJob, 2, 2); + dstFileStatus = fs.listStatus(dstBucketPath)[0]; + // src and dst should have same replication + assertEquals(sourceRepFactor, dstFileStatus.getReplication()); + + // test if copy is skipped due to matching checksums + assertFalse(options.shouldSkipCRC()); + distcpJob = new DistCp(conf, options).execute(); + verifyCopy(dstBucketPath, distcpJob, 0, 2); + } + + private void verifyCopy(Path dstBucketPath, Job distcpJob, + long expectedFilesToBeCopied, long expectedTotalFilesInDest) throws IOException { + long filesCopied = + distcpJob.getCounters().findCounter(CopyMapper.Counter.COPY).getValue(); + FileStatus[] destinationFileStatus = fs.listStatus(dstBucketPath); + assertEquals(expectedTotalFilesInDest, destinationFileStatus.length); + assertEquals(expectedFilesToBeCopied, filesCopied); + } + + private List createFiles(Path srcBucketPath, int fileCount, short factor) throws IOException { + List createdFiles = new ArrayList<>(); + for (int i = 1; i <= fileCount; i++) { + String keyName = "key" + RandomStringUtils.randomNumeric(5); + Path file = new Path(srcBucketPath, keyName); + try (FSDataOutputStream fsDataOutputStream = fs.create(file, factor)) { + fsDataOutputStream.writeBytes("Hello"); + } + createdFiles.add(keyName); + } + return createdFiles; + } + + private void deleteFiles(Path base, List fileNames) throws IOException { + for (String key : fileNames) { + fs.delete(new Path(base, key)); + } + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java index 4d35d863acf9..9389a4fdbd08 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java @@ -22,7 +22,6 @@ import org.apache.hadoop.fs.LeaseRecoverable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.junit.jupiter.api.Test; @@ -30,6 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.FileNotFoundException; import java.io.IOException; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -217,7 +217,7 @@ void testLeaseRecoverable() throws Exception { LeaseRecoverable fs = (LeaseRecoverable)getFs(); FSDataOutputStream stream = getFs().create(source); try { - assertThrows(OMException.class, () -> fs.isFileClosed(source)); + assertThrows(FileNotFoundException.class, () -> fs.isFileClosed(source)); stream.write(1); stream.hsync(); assertFalse(fs.isFileClosed(source)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java index d729251267ea..47c584e048a6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import java.io.IOException; @@ -30,6 +31,8 @@ import java.util.TreeSet; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -95,4 +98,17 @@ private static void listStatusIterator(FileSystem subject, assertEquals(total, iCount); } + + static void createKeyWithECReplicationConfiguration(OzoneConfiguration inputConf, Path keyPath) + throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(inputConf); + conf.set(OZONE_REPLICATION, "rs-3-2-1024k"); + conf.set(OZONE_REPLICATION_TYPE, "EC"); + URI uri = FileSystem.getDefaultUri(conf); + conf.setBoolean( + String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); + try (FileSystem fileSystem = FileSystem.get(uri, conf)) { + ContractTestUtils.touch(fileSystem, keyPath); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java index 87f114bd7115..382f4b72034c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java @@ -55,13 +55,14 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -126,15 +127,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } @Test @@ -534,16 +533,14 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + + return count.get() == expectedCount; } private void checkPath(Path path) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index a8c450e3cc99..13cae7bff92a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -25,7 +25,10 @@ import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; @@ -33,27 +36,27 @@ import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.CryptoOutputStream; import org.apache.hadoop.crypto.Encryptor; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; -import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -69,10 +72,14 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.ECKeyOutputStream; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; +import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; +import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; @@ -81,9 +88,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest; -import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestWithFSO; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; +import org.apache.hadoop.ozone.om.service.OpenKeyCleanupService; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils; @@ -104,14 +109,22 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; +import static org.apache.hadoop.ozone.TestDataUtil.cleanupDeletedTable; +import static org.apache.hadoop.ozone.TestDataUtil.cleanupOpenKeyTable; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LEASE_HARD_LIMIT; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -143,6 +156,10 @@ public class TestHSync { private static final int FLUSH_SIZE = 2 * CHUNK_SIZE; private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE; private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE; + private static final int SERVICE_INTERVAL = 100; + private static final int EXPIRE_THRESHOLD_MS = 140; + + private static OpenKeyCleanupService openKeyCleanupService; @BeforeAll public static void init() throws Exception { @@ -151,10 +168,21 @@ public static void init() throws Exception { CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); + CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); // Reduce KeyDeletingService interval CONF.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + CONF.setTimeDuration(OZONE_DIR_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); CONF.setBoolean("ozone.client.incremental.chunk.list", true); + CONF.setBoolean("ozone.client.stream.putblock.piggybacking", true); CONF.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true); + CONF.setTimeDuration(OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL, + SERVICE_INTERVAL, TimeUnit.MILLISECONDS); + CONF.setTimeDuration(OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD, + EXPIRE_THRESHOLD_MS, TimeUnit.MILLISECONDS); + CONF.setTimeDuration(OZONE_OM_LEASE_HARD_LIMIT, + EXPIRE_THRESHOLD_MS, TimeUnit.MILLISECONDS); + CONF.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(BLOCK_SIZE) .setChunkSize(CHUNK_SIZE) @@ -167,7 +195,6 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(CONF) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -176,11 +203,15 @@ public static void init() throws Exception { bucket = TestDataUtil.createVolumeAndBucket(client, layout); // Enable DEBUG level logging for relevant classes - GenericTestUtils.setLogLevel(OMKeyRequest.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(OMKeyCommitRequest.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(OMKeyCommitRequestWithFSO.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(BlockManagerImpl.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(AbstractDatanodeStore.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockOutputStream.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockInputStream.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(KeyValueHandler.LOG, Level.DEBUG); + + openKeyCleanupService = + (OpenKeyCleanupService) cluster.getOzoneManager().getKeyManager().getOpenKeyCleanupService(); + openKeyCleanupService.suspend(); } @AfterAll @@ -391,6 +422,65 @@ public void testHSyncDeletedKey() throws Exception { } } + @Test + public void testHSyncOpenKeyDeletionWhileDeleteDirectory() throws Exception { + // Verify that when directory is deleted recursively hsync related openKeys should be deleted, + + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName() + OZONE_URI_DELIMITER + "dir1/dir2"; + final Path key1 = new Path(dir, "hsync-key"); + + try (FileSystem fs = FileSystem.get(CONF)) { + // Create key1 + try (FSDataOutputStream os = fs.create(key1, true)) { + os.write(1); + os.hsync(); + // There should be 1 key in openFileTable + assertThat(1 == getOpenKeyInfo(BUCKET_LAYOUT).size()); + // Delete directory recursively + fs.delete(new Path(OZONE_ROOT + bucket.getVolumeName() + OZONE_URI_DELIMITER + + bucket.getName() + OZONE_URI_DELIMITER + "dir1/"), true); + + // Verify if DELETED_HSYNC_KEY metadata is added to openKey + GenericTestUtils.waitFor(() -> { + List omKeyInfo = getOpenKeyInfo(BUCKET_LAYOUT); + return omKeyInfo.size() > 0 && omKeyInfo.get(0).getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY); + }, 1000, 12000); + + // Resume openKeyCleanupService + openKeyCleanupService.resume(); + + // Verify entry from openKey gets deleted eventually + GenericTestUtils.waitFor(() -> + 0 == getOpenKeyInfo(BUCKET_LAYOUT).size(), 1000, 12000); + } catch (OMException ex) { + assertEquals(OMException.ResultCodes.DIRECTORY_NOT_FOUND, ex.getResult()); + } finally { + openKeyCleanupService.suspend(); + } + } + } + + private List getOpenKeyInfo(BucketLayout bucketLayout) { + List omKeyInfo = new ArrayList<>(); + + Table openFileTable = + cluster.getOzoneManager().getMetadataManager().getOpenKeyTable(bucketLayout); + try (TableIterator> + iterator = openFileTable.iterator()) { + while (iterator.hasNext()) { + omKeyInfo.add(iterator.next().getValue()); + } + } catch (Exception e) { + } + return omKeyInfo; + } + @Test public void testUncommittedBlocks() throws Exception { // Set the fs.defaultFS @@ -405,7 +495,7 @@ public void testUncommittedBlocks() throws Exception { ThreadLocalRandom.current().nextBytes(data); try (FileSystem fs = FileSystem.get(CONF)) { - final Path file = new Path(dir, "file"); + final Path file = new Path(dir, "file-hsync-uncommitted-blocks"); try (FSDataOutputStream outputStream = fs.create(file, true)) { outputStream.hsync(); outputStream.write(data); @@ -459,7 +549,7 @@ public void testHsyncKeyCallCount() throws Exception { final byte[] data = new byte[128]; ThreadLocalRandom.current().nextBytes(data); - final Path file = new Path(dir, "file-hsync-then-close"); + final Path file = new Path(dir, "file-hsync"); try (FileSystem fs = FileSystem.get(CONF)) { long fileSize = 0; try (FSDataOutputStream outputStream = fs.create(file, true)) { @@ -548,7 +638,8 @@ static void runTestHSync(FileSystem fs, Path file, break; } for (int i = 0; i < n; i++) { - assertEquals(data[offset + i], buffer[i]); + assertEquals(data[offset + i], buffer[i], + "expected at offset " + offset + " i=" + i); } offset += n; } @@ -875,4 +966,275 @@ public void writeWithBigBuffer(boolean incrementalChunkList, int bufferSize) } bucket.deleteKey(keyName); } + + @Test + public void testNormalKeyOverwriteHSyncKey() throws Exception { + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + + // Expect empty OpenKeyTable before key creation + OzoneManager ozoneManager = cluster.getOzoneManager(); + cleanupDeletedTable(ozoneManager); + cleanupOpenKeyTable(ozoneManager, BUCKET_LAYOUT); + OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); + Table openKeyTable = metadataManager.getOpenKeyTable(BUCKET_LAYOUT); + Table deletedTable = metadataManager.getDeletedTable(); + assertTrue(openKeyTable.isEmpty()); + assertTrue(deletedTable.isEmpty()); + ozoneManager.getKeyManager().getDeletingService().suspend(); + OMMetrics metrics = ozoneManager.getMetrics(); + metrics.incDataCommittedBytes(-metrics.getDataCommittedBytes()); + assertEquals(0, metrics.getDataCommittedBytes()); + OzoneVolume volume = client.getObjectStore().getVolume(bucket.getVolumeName()); + OzoneBucket ozoneBucket = volume.getBucket(bucket.getName()); + long usedBytes = ozoneBucket.getUsedBytes(); + + String data1 = "data for normal file"; + String data2 = "data for hsynced file"; + final Path file = new Path(dir, "file-normal-overwrite-hsync"); + try (FileSystem fs = FileSystem.get(CONF)) { + // create hsync key + FSDataOutputStream outputStream1 = fs.create(file, true); + outputStream1.write(data2.getBytes(UTF_8), 0, data2.length()); + outputStream1.hsync(); + // write more data + String s = RandomStringUtils.randomAlphabetic(BLOCK_SIZE); + byte[] newData = s.getBytes(StandardCharsets.UTF_8); + outputStream1.write(newData); + + // create normal key and commit + FSDataOutputStream outputStream2 = fs.create(file, true); + outputStream2.write(data1.getBytes(UTF_8), 0, data1.length()); + outputStream2.close(); + assertEquals(data1.length(), metrics.getDataCommittedBytes()); + + // hsync call for overwritten hsync key, should fail + OMException omException = assertThrows(OMException.class, () -> outputStream1.hsync()); + assertTrue(omException.getResult() == OMException.ResultCodes.KEY_NOT_FOUND); + assertTrue(omException.getMessage().contains("already deleted/overwritten")); + + // allocate new block for overwritten hsync key, should fail + IOException ioException = assertThrows(IOException.class, () -> outputStream1.write(newData)); + assertTrue(ioException.getCause() instanceof OMException); + assertTrue(((OMException)ioException.getCause()).getResult() == OMException.ResultCodes.KEY_NOT_FOUND); + assertTrue(ioException.getMessage().contains("already deleted/overwritten")); + + // recover key will success since key is already committed by outputStream2 + ((RootedOzoneFileSystem)fs).recoverLease(file); + + Map openKeys = getAllOpenKeys(openKeyTable); + Map deletedKeys = getAllDeletedKeys(deletedTable); + // outputStream1's has one openKey left in openKeyTable. It will be cleaned up by OpenKeyCleanupService later. + assertEquals(1, openKeys.size()); + // outputStream1's has one delete key record in deletedTable + assertEquals(1, deletedKeys.size()); + + // final file will have data1 content + OzoneKeyDetails keyInfo = bucket.getKey(file.getName()); + try (OzoneInputStream is = bucket.readKey(file.getName())) { + ByteBuffer readBuffer = ByteBuffer.allocate((int) keyInfo.getDataSize()); + int readLen = is.read(readBuffer); + assertEquals(keyInfo.getDataSize(), readLen); + assertArrayEquals(data1.getBytes(UTF_8), readBuffer.array()); + } + + // verify bucket info + ozoneBucket = volume.getBucket(bucket.getName()); + assertEquals(keyInfo.getDataSize() * keyInfo.getReplicationConfig().getRequiredNodes() + usedBytes, + ozoneBucket.getUsedBytes()); + + // Resume openKeyCleanupService + openKeyCleanupService.resume(); + // Verify entry from openKey gets deleted eventually + GenericTestUtils.waitFor(() -> { + try { + return getAllOpenKeys(openKeyTable).size() == 0 && getAllDeletedKeys(deletedTable).size() == 2; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 100, 5000); + } finally { + cleanupDeletedTable(ozoneManager); + cleanupOpenKeyTable(ozoneManager, BUCKET_LAYOUT); + ozoneManager.getKeyManager().getDeletingService().resume(); + openKeyCleanupService.suspend(); + } + } + + @Test + public void testHSyncKeyOverwriteNormalKey() throws Exception { + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + + // Expect empty OpenKeyTable before key creation + OzoneManager ozoneManager = cluster.getOzoneManager(); + cleanupDeletedTable(ozoneManager); + cleanupOpenKeyTable(ozoneManager, BUCKET_LAYOUT); + OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); + Table openKeyTable = metadataManager.getOpenKeyTable(BUCKET_LAYOUT); + Table deletedTable = metadataManager.getDeletedTable(); + assertTrue(openKeyTable.isEmpty()); + assertTrue(deletedTable.isEmpty()); + ozoneManager.getKeyManager().getDeletingService().suspend(); + OMMetrics metrics = ozoneManager.getMetrics(); + metrics.incDataCommittedBytes(-metrics.getDataCommittedBytes()); + assertEquals(0, metrics.getDataCommittedBytes()); + OzoneVolume volume = client.getObjectStore().getVolume(bucket.getVolumeName()); + OzoneBucket ozoneBucket = volume.getBucket(bucket.getName()); + long usedBytes = ozoneBucket.getUsedBytes(); + + String data1 = "data for normal file"; + String data2 = "data for hsynced file"; + final Path file = new Path(dir, "file-hsync-overwrite-normal"); + try (FileSystem fs = FileSystem.get(CONF)) { + // create and commit normal key + FSDataOutputStream outputStream1 = fs.create(file, true); + outputStream1.write(data1.getBytes(UTF_8), 0, data1.length()); + outputStream1.close(); + assertEquals(data1.length(), metrics.getDataCommittedBytes()); + + // create hsync key and commit + FSDataOutputStream outputStream2 = fs.create(file, true); + outputStream2.write(data2.getBytes(UTF_8), 0, data2.length()); + outputStream2.hsync(); + outputStream2.close(); + assertEquals(data1.length() + data2.length(), metrics.getDataCommittedBytes()); + + Map openKeys = getAllOpenKeys(openKeyTable); + Map deletedKeys = getAllDeletedKeys(deletedTable); + // There should be no key in openKeyTable + assertEquals(0, openKeys.size()); + // There should be one key in delete table + assertEquals(1, deletedKeys.size()); + + // final file will have data2 content + OzoneKeyDetails keyInfo = bucket.getKey(file.getName()); + try (OzoneInputStream is = bucket.readKey(file.getName())) { + ByteBuffer readBuffer = ByteBuffer.allocate((int) keyInfo.getDataSize()); + int readLen = is.read(readBuffer); + assertEquals(keyInfo.getDataSize(), readLen); + assertArrayEquals(data2.getBytes(UTF_8), readBuffer.array()); + } + + // verify bucket info + ozoneBucket = volume.getBucket(bucket.getName()); + assertEquals(keyInfo.getDataSize() * keyInfo.getReplicationConfig().getRequiredNodes() + usedBytes, + ozoneBucket.getUsedBytes()); + } finally { + cleanupDeletedTable(ozoneManager); + cleanupOpenKeyTable(ozoneManager, BUCKET_LAYOUT); + ozoneManager.getKeyManager().getDeletingService().resume(); + } + } + + @Test + public void testHSyncKeyOverwriteHSyncKey() throws Exception { + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + + // Expect empty OpenKeyTable before key creation + OzoneManager ozoneManager = cluster.getOzoneManager(); + cleanupDeletedTable(ozoneManager); + cleanupOpenKeyTable(ozoneManager, BUCKET_LAYOUT); + OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); + Table openKeyTable = metadataManager.getOpenKeyTable(BUCKET_LAYOUT); + Table deletedTable = metadataManager.getDeletedTable(); + assertTrue(openKeyTable.isEmpty()); + assertTrue(deletedTable.isEmpty()); + ozoneManager.getKeyManager().getDeletingService().suspend(); + OMMetrics metrics = ozoneManager.getMetrics(); + metrics.incDataCommittedBytes(-metrics.getDataCommittedBytes()); + assertEquals(0, metrics.getDataCommittedBytes()); + OzoneVolume volume = client.getObjectStore().getVolume(bucket.getVolumeName()); + OzoneBucket ozoneBucket = volume.getBucket(bucket.getName()); + long usedBytes = ozoneBucket.getUsedBytes(); + + String data1 = "data for first hsynced file"; + String data2 = "data for second hsynced file"; + final Path file = new Path(dir, "file-hsync-overwrite-hsync"); + try (FileSystem fs = FileSystem.get(CONF)) { + // create first hsync key and call hsync + FSDataOutputStream outputStream1 = fs.create(file, true); + outputStream1.write(data1.getBytes(UTF_8), 0, data1.length()); + outputStream1.hsync(); + + // create second hync key and call hsync + FSDataOutputStream outputStream2 = fs.create(file, true); + outputStream2.write(data2.getBytes(UTF_8), 0, data2.length()); + outputStream2.hsync(); + + // close first hsync key should fail + OMException omException = assertThrows(OMException.class, () -> outputStream1.close()); + assertTrue(omException.getResult() == OMException.ResultCodes.KEY_NOT_FOUND); + assertTrue(omException.getMessage().contains("already deleted/overwritten")); + + // hsync/close second hsync key should success + outputStream2.hsync(); + outputStream2.close(); + + Map openKeys = getAllOpenKeys(openKeyTable); + Map deletedKeys = getAllDeletedKeys(deletedTable); + // outputStream1's has one openKey left in openKeyTable. It will be cleaned up by OpenKeyCleanupService later. + assertEquals(1, openKeys.size()); + // outputStream1's has one delete key record in deletedTable + assertEquals(1, deletedKeys.size()); + + // final file will have data2 content + OzoneKeyDetails keyInfo = bucket.getKey(file.getName()); + try (OzoneInputStream is = bucket.readKey(file.getName())) { + ByteBuffer readBuffer = ByteBuffer.allocate((int) keyInfo.getDataSize()); + int readLen = is.read(readBuffer); + assertEquals(keyInfo.getDataSize(), readLen); + assertArrayEquals(data2.getBytes(UTF_8), readBuffer.array()); + } + + // verify bucket info + ozoneBucket = volume.getBucket(bucket.getName()); + assertEquals(keyInfo.getDataSize() * keyInfo.getReplicationConfig().getRequiredNodes() + usedBytes, + ozoneBucket.getUsedBytes()); + } finally { + cleanupDeletedTable(ozoneManager); + cleanupOpenKeyTable(ozoneManager, BUCKET_LAYOUT); + ozoneManager.getKeyManager().getDeletingService().resume(); + } + } + + private Map getAllOpenKeys(Table table) throws IOException { + Map keys = new HashMap(); + try (TableIterator> tableIter = table.iterator()) { + while (tableIter.hasNext()) { + Table.KeyValue kv = tableIter.next(); + String key = kv.getKey(); + keys.put(key, kv.getValue()); + } + } + return keys; + } + + private Map getAllDeletedKeys(Table table) throws IOException { + Map keys = new HashMap(); + try (TableIterator> tableIter = table.iterator()) { + while (tableIter.hasNext()) { + Table.KeyValue kv = tableIter.next(); + String key = kv.getKey(); + keys.put(key, kv.getValue()); + } + } + return keys; + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index 4b45bb5fa0d1..494f3d5ca2b0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -53,14 +53,18 @@ import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.event.Level; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; import java.net.ConnectException; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_READ_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; @@ -116,9 +120,11 @@ public void init() throws IOException, InterruptedException, conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); // make sure flush will write data to DN conf.setBoolean("ozone.client.stream.buffer.flush.delay", false); + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) @@ -131,7 +137,6 @@ public void init() throws IOException, InterruptedException, cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -190,6 +195,42 @@ public void testRecovery(int dataSize) throws Exception { verifyData(data, dataSize * 2, file, fs); } + @Test + public void testRecoveryWithoutHsyncHflushOnLastBlock() throws Exception { + RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); + + int blockSize = (int) cluster.getOzoneManager().getConfiguration().getStorageSize( + OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); + + final byte[] data = getData(blockSize / 2 + 1); + + final FSDataOutputStream stream = fs.create(file, true); + try { + stream.write(data); + stream.hsync(); + assertFalse(fs.isFileClosed(file)); + + // It will write into new block as well + // Don't do hsync/flush + stream.write(data); + + int count = 0; + while (count++ < 15 && !fs.recoverLease(file)) { + Thread.sleep(1000); + } + // The lease should have been recovered. + assertTrue(fs.isFileClosed(file), "File should be closed"); + + // A second call to recoverLease should succeed too. + assertTrue(fs.recoverLease(file)); + } finally { + closeIgnoringKeyNotFound(stream); + } + + // open it again, make sure the data is correct + verifyData(data, blockSize / 2 + 1, file, fs); + } + @Test public void testOBSRecoveryShouldFail() throws Exception { // Set the fs.defaultFS @@ -224,6 +265,7 @@ public void testFinalizeBlockFailure() throws Exception { stream.flush(); FaultInjectorImpl injector = new FaultInjectorImpl(); + injector.setType(ContainerProtos.Type.FinalizeBlock); KeyValueHandler.setInjector(injector); StorageContainerException sce = new StorageContainerException( "Requested operation not allowed as ContainerState is CLOSED", @@ -316,6 +358,7 @@ public void testGetCommittedBlockLengthTimeout(boolean forceRecovery) throws Exc OzoneTestUtils.closeContainer(cluster.getStorageContainerManager(), container); // pause getCommittedBlockLength handling on all DNs to make sure all getCommittedBlockLength will time out FaultInjectorImpl injector = new FaultInjectorImpl(); + injector.setType(ContainerProtos.Type.GetCommittedBlockLength); KeyValueHandler.setInjector(injector); GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(XceiverClientGrpc.getLogger()); @@ -438,7 +481,7 @@ public void testRecoverWrongFile() throws Exception { stream.hsync(); assertFalse(fs.isFileClosed(file)); - assertThrows(OMException.class, () -> fs.recoverLease(notExistFile)); + assertThrows(FileNotFoundException.class, () -> fs.recoverLease(notExistFile)); } finally { closeIgnoringKeyNotFound(stream); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index c6893c57e969..78c4bf4961d9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.ozone; import java.io.BufferedInputStream; +import java.io.EOFException; import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -50,14 +51,18 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Test OzoneFSInputStream by reading through multiple interfaces. @@ -162,6 +167,124 @@ public void testO3FSSingleByteRead() throws IOException { } } + @Test + public void testByteBufferPositionedRead() throws IOException { + try (FSDataInputStream inputStream = fs.open(filePath)) { + int bufferCapacity = 20; + ByteBuffer buffer = ByteBuffer.allocate(bufferCapacity); + long currentPos = inputStream.getPos(); + // Read positional data from 50th index + int position = 50; + int readBytes = inputStream.read(position, buffer); + + // File position should not be changed after positional read + assertEquals(currentPos, inputStream.getPos()); + // Total read bytes should be equal to bufferCapacity + // As file has more data than bufferCapacity + assertEquals(readBytes, bufferCapacity); + byte[] value1 = new byte[readBytes]; + System.arraycopy(buffer.array(), 0, value1, 0, readBytes); + byte[] value2 = new byte[readBytes]; + System.arraycopy(data, position, value2, 0, readBytes); + // Verify input and positional read data + assertArrayEquals(value1, value2, "value mismatch"); + buffer.clear(); + + // Read positional from 8th index again using same inputStream + position = 8; + readBytes = inputStream.read(position, buffer); + assertEquals(currentPos, inputStream.getPos()); + assertEquals(readBytes, bufferCapacity); + byte[] value3 = new byte[readBytes]; + System.arraycopy(buffer.array(), 0, value3, 0, readBytes); + byte[] value4 = new byte[readBytes]; + System.arraycopy(data, position, value4, 0, readBytes); + // Verify input and positional read data + assertArrayEquals(value3, value4, "value mismatch"); + + // Buffer size more than actual data, still read should succeed + ByteBuffer buffer1 = ByteBuffer.allocate(30 * 1024 * 1024 * 2); + // Read positional from 12th index + position = 12; + readBytes = inputStream.read(position, buffer1); + assertEquals(currentPos, inputStream.getPos()); + // Total read bytes should be (total file bytes - position) as buffer is not filled completely + assertEquals(readBytes, 30 * 1024 * 1024 - position); + + byte[] value5 = new byte[readBytes]; + System.arraycopy(buffer1.array(), 0, value5, 0, readBytes); + byte[] value6 = new byte[readBytes]; + System.arraycopy(data, position, value6, 0, readBytes); + // Verify input and positional read data + assertArrayEquals(value5, value6, "value mismatch"); + } + } + + @ParameterizedTest + @ValueSource(ints = { -1, 30 * 1024 * 1024, 30 * 1024 * 1024 + 1 }) + public void testByteBufferPositionedReadWithInvalidPosition(int position) throws IOException { + try (FSDataInputStream inputStream = fs.open(filePath)) { + long currentPos = inputStream.getPos(); + ByteBuffer buffer = ByteBuffer.allocate(20); + assertEquals(-1, inputStream.read(position, buffer)); + // File position should not be changed + assertEquals(currentPos, inputStream.getPos()); + } + } + + @Test + public void testByteBufferPositionedReadFully() throws IOException { + try (FSDataInputStream inputStream = fs.open(filePath)) { + int bufferCapacity = 20; + long currentPos = inputStream.getPos(); + ByteBuffer buffer = ByteBuffer.allocate(bufferCapacity); + // Read positional data from 50th index + int position = 50; + inputStream.readFully(position, buffer); + // File position should not be changed after positional readFully + assertEquals(currentPos, inputStream.getPos()); + // Make sure buffer is full after readFully + Assertions.assertThat((!buffer.hasRemaining())); + + byte[] value1 = new byte[bufferCapacity]; + System.arraycopy(buffer.array(), 0, value1, 0, bufferCapacity); + byte[] value2 = new byte[bufferCapacity]; + System.arraycopy(data, position, value2, 0, bufferCapacity); + // Verify input and positional read data + assertArrayEquals(value1, value2, "value mismatch"); + buffer.clear(); + + // Read positional from 8th index again using same inputStream + position = 8; + inputStream.readFully(position, buffer); + assertEquals(currentPos, inputStream.getPos()); + Assertions.assertThat((!buffer.hasRemaining())); + byte[] value3 = new byte[bufferCapacity]; + System.arraycopy(buffer.array(), 0, value3, 0, bufferCapacity); + byte[] value4 = new byte[bufferCapacity]; + System.arraycopy(data, position, value4, 0, bufferCapacity); + // Verify input and positional read data + assertArrayEquals(value3, value4, "value mismatch"); + + // Buffer size is more than actual data, readFully should fail in this case + ByteBuffer buffer1 = ByteBuffer.allocate(30 * 1024 * 1024 * 2); + assertThrows(EOFException.class, () -> inputStream.readFully(12, buffer1)); + assertEquals(currentPos, inputStream.getPos()); + } + } + + @ParameterizedTest + @ValueSource(ints = { -1, 30 * 1024 * 1024, 30 * 1024 * 1024 + 1 }) + public void testByteBufferPositionedReadFullyWithInvalidPosition(int position) throws IOException { + try (FSDataInputStream inputStream = fs.open(filePath)) { + long currentPos = inputStream.getPos(); + ByteBuffer buffer = ByteBuffer.allocate(20); + assertThrows(EOFException.class, () -> inputStream.readFully(position, buffer)); + // File position should not be changed + assertEquals(currentPos, inputStream.getPos()); + } + } + @Test public void testO3FSMultiByteRead() throws IOException { try (FSDataInputStream inputStream = fs.open(filePath)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 6dccd604208f..a41dcd80acdc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -48,6 +49,7 @@ import java.io.FileNotFoundException; import java.net.URI; +import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -57,6 +59,8 @@ import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.assertj.core.api.Assertions.assertThat; @@ -293,10 +297,13 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete() // This should succeed, as we check during creation of part or during // complete MPU. + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(MD5_HASH) + .digest(b)).toLowerCase()); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); // Should fail, as we have directory with same name. OMException ex = assertThrows(OMException.class, () -> ozoneBucket.completeMultipartUpload(keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java index 2a6c8c456b9c..059f7b3e03d3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java @@ -44,7 +44,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; @@ -82,11 +83,12 @@ public static void init() throws Exception { final int blockSize = 2 * maxFlushSize; final BucketLayout layout = BucketLayout.FILE_SYSTEM_OPTIMIZED; - CONF.setBoolean(DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + CONF.setBoolean(HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); CONF.setBoolean(OZONE_FS_DATASTREAM_ENABLED, true); CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B"); CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); + CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -100,7 +102,6 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(CONF) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 47dc9ac0c3ba..4f14ede8fa52 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -72,7 +72,7 @@ public class TestOzoneFsHAURLs { TestOzoneFsHAURLs.class); private OzoneConfiguration conf; - private static MiniOzoneCluster cluster; + private static MiniOzoneHAClusterImpl cluster; private static String omServiceId; private static OzoneManager om; private static int numOfOMs; @@ -107,12 +107,11 @@ static void initClass(@TempDir File tempDir) throws Exception { conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); // Start the cluster - cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) - .build(); + .setNumDatanodes(5); + cluster = builder.build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(omServiceId, conf); @@ -161,8 +160,7 @@ public static void shutdown() { * @return the leader OM's RPC address in the MiniOzoneHACluster */ private String getLeaderOMNodeAddr() { - MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster; - OzoneManager omLeader = haCluster.getOMLeader(); + OzoneManager omLeader = cluster.getOMLeader(); assertNotNull(omLeader, "There should be a leader OM at this point."); String omNodeId = omLeader.getOMNodeId(); // omLeaderAddrKey=ozone.om.address.omServiceId.omNodeId diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index ae6a24a910cf..8e0bd1ac7deb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -24,12 +24,14 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -47,9 +49,11 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -66,12 +70,13 @@ class TestOzoneFsSnapshot { private static final String OM_SERVICE_ID = "om-service-test1"; private static OzoneManager ozoneManager; private static OzoneFsShell shell; + private static AtomicInteger counter = new AtomicInteger(); private static final String VOLUME = - "vol-" + RandomStringUtils.randomNumeric(5); + "vol-" + counter.incrementAndGet(); private static final String BUCKET = - "buck-" + RandomStringUtils.randomNumeric(5); + "buck-" + counter.incrementAndGet(); private static final String KEY = - "key-" + RandomStringUtils.randomNumeric(5); + "key-" + counter.incrementAndGet(); private static final String BUCKET_PATH = OM_KEY_PREFIX + VOLUME + OM_KEY_PREFIX + BUCKET; private static final String BUCKET_WITH_SNAPSHOT_INDICATOR_PATH = @@ -84,9 +89,11 @@ static void initClass() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); + conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); // Start the cluster - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(1) .build(); @@ -128,7 +135,7 @@ private static void createVolBuckKey() @Test void testCreateSnapshotDuplicateName() throws Exception { - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String snapshotName = "snap-" + counter.incrementAndGet(); int res = ToolRunner.run(shell, new String[]{"-createSnapshot", BUCKET_PATH, snapshotName}); @@ -152,7 +159,7 @@ void testCreateSnapshotWithSubDirInput() throws Exception { // rather than: // Created snapshot ofs://om/vol1/buck2/dir3/.snapshot/snap1 - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String snapshotName = "snap-" + counter.incrementAndGet(); String dirPath = BUCKET_PATH + "/dir1/"; @@ -257,7 +264,7 @@ void testCreateSnapshotFailure(String description, */ @Test void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { - String key1 = "key-" + RandomStringUtils.randomNumeric(5); + String key1 = "key-" + counter.incrementAndGet(); String newKeyPath = BUCKET_PATH + OM_KEY_PREFIX + key1; // Pause SnapshotDeletingService so that Snapshot marked deleted is not reclaimed. ozoneManager.getKeyManager().getSnapshotDeletingService().suspend(); @@ -274,7 +281,7 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { String snapshotPath1 = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snapshotName1; - String key2 = "key-" + RandomStringUtils.randomNumeric(5); + String key2 = "key-" + counter.incrementAndGet(); String newKeyPath2 = BUCKET_PATH + OM_KEY_PREFIX + key2; execShellCommandAndGetOutput(0, new String[]{"-put", tempFile.toString(), newKeyPath2}); @@ -413,6 +420,64 @@ void testSnapshotDeleteFailure(String description, assertThat(errorMessage).contains(expectedMessage); } + @Test + public void testSnapshotReuseSnapName() throws Exception { + String key1 = "key-" + counter.incrementAndGet(); + int res = ToolRunner.run(shell, new String[]{"-touch", + BUCKET_PATH + OM_KEY_PREFIX + key1}); + assertEquals(0, res); + + String snap1 = "snap" + counter.incrementAndGet(); + res = ToolRunner.run(shell, + new String[]{"-createSnapshot", BUCKET_PATH, snap1}); + // Asserts that create request succeeded + assertEquals(0, res); + + String listSnapOut = execShellCommandAndGetOutput(0, + new String[]{"-ls", BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snap1}); + assertThat(listSnapOut).contains(key1); + + res = ToolRunner.run(shell, + new String[]{"-deleteSnapshot", BUCKET_PATH, snap1}); + // Asserts that delete request succeeded + assertEquals(0, res); + + GenericTestUtils.waitFor(() -> { + try { + return !ozoneManager.getMetadataManager().getSnapshotInfoTable() + .isExist(SnapshotInfo.getTableKey(VOLUME, BUCKET, snap1)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 200, 10000); + + String key2 = "key-" + counter.incrementAndGet(); + res = ToolRunner.run(shell, new String[]{"-touch", + BUCKET_PATH + OM_KEY_PREFIX + key2}); + assertEquals(0, res); + String snap2 = "snap" + counter.incrementAndGet(); + res = ToolRunner.run(shell, + new String[]{"-createSnapshot", BUCKET_PATH, snap2}); + // Asserts that create request succeeded + assertEquals(0, res); + + String key3 = "key-" + counter.incrementAndGet(); + res = ToolRunner.run(shell, new String[]{"-touch", + BUCKET_PATH + OM_KEY_PREFIX + key3}); + assertEquals(0, res); + + res = ToolRunner.run(shell, + new String[]{"-createSnapshot", BUCKET_PATH, snap1}); + // Asserts that create request succeeded + assertEquals(0, res); + + listSnapOut = execShellCommandAndGetOutput(0, + new String[]{"-ls", BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snap1}); + assertThat(listSnapOut).contains(key1); + assertThat(listSnapOut).contains(key2); + assertThat(listSnapOut).contains(key3); + } + /** * Execute a shell command with provided arguments * and return a string of the output. @@ -453,7 +518,7 @@ private String execShellCommandAndGetOutput( } private String createSnapshot() throws Exception { - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String snapshotName = "snap-" + counter.incrementAndGet(); // Create snapshot int res = ToolRunner.run(shell, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java index 074a8e7df4ba..de3358685ec4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java @@ -47,9 +47,9 @@ import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; -import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.fs.ozone.TestDirectoryDeletingServiceWithFSO.assertSubPathsCount; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; @@ -58,10 +58,10 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; /** * Directory deletion service test cases using rooted ozone filesystem @@ -128,15 +128,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), false); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } @Test @@ -227,16 +225,13 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } private static BucketLayout getFSOBucketLayout() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java index ef46ec99d717..76685169011b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java @@ -62,7 +62,7 @@ class TestSafeMode { static void setup() { OzoneConfiguration conf = new OzoneConfiguration(); clusterProvider = new MiniOzoneClusterProvider( - conf, MiniOzoneCluster.newBuilder(conf), 2); + MiniOzoneCluster.newBuilder(conf), 2); } @BeforeEach diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java index ab1736c3b0bc..b79c9a870e4e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; +import org.apache.hadoop.fs.contract.AbstractContractLeaseRecoveryTest; import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractContractOpenTest; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; @@ -33,6 +34,7 @@ import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -46,6 +48,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.assertj.core.api.Assumptions.assumeThat; /** @@ -312,4 +315,45 @@ protected AbstractFSContract createContract(Configuration conf) { } } + @Nested + class TestContractLeaseRecovery extends AbstractContractLeaseRecoveryTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + @Test + public void testLeaseRecovery() throws Throwable { + assumeThat(getContract().getConf().get(OZONE_DEFAULT_BUCKET_LAYOUT, + BucketLayout.FILE_SYSTEM_OPTIMIZED.name())) + .isEqualTo(BucketLayout.FILE_SYSTEM_OPTIMIZED.name()); + super.testLeaseRecovery(); + } + + @Override + @Test + public void testLeaseRecoveryFileNotExist() throws Throwable { + assumeThat(getContract().getConf().get(OZONE_DEFAULT_BUCKET_LAYOUT, + BucketLayout.FILE_SYSTEM_OPTIMIZED.name())) + .isEqualTo(BucketLayout.FILE_SYSTEM_OPTIMIZED.name()); + super.testLeaseRecoveryFileNotExist(); + } + + @Override + @Test + public void testLeaseRecoveryFileOnDirectory() throws Throwable { + assumeThat(getContract().getConf().get(OZONE_DEFAULT_BUCKET_LAYOUT, + BucketLayout.FILE_SYSTEM_OPTIMIZED.name())) + .isEqualTo(BucketLayout.FILE_SYSTEM_OPTIMIZED.name()); + super.testLeaseRecoveryFileOnDirectory(); + } + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java index 30c4e4cd5b4d..87728f6ce101 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java @@ -31,6 +31,8 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -188,6 +190,23 @@ public void testReadWriteWithBCSId() throws Exception { assertEquals("data123", readData); xceiverClientManager.releaseClient(client, false); } + + @Test + public void testEcho() throws Exception { + ContainerWithPipeline container = + storageContainerLocationClient.allocateContainer( + SCMTestUtils.getReplicationType(ozoneConfig), + HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE); + XceiverClientSpi client = xceiverClientManager + .acquireClient(container.getPipeline()); + ContainerProtocolCalls.createContainer(client, + container.getContainerInfo().getContainerID(), null); + ByteString byteString = UnsafeByteOperations.unsafeWrap(new byte[0]); + ContainerProtos.EchoResponseProto response = + ContainerProtocolCalls.echo(client, "", container.getContainerInfo().getContainerID(), byteString, 1, 0, true); + assertEquals(1, response.getPayload().size()); + xceiverClientManager.releaseClient(client, false); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java index 688d13ad361b..9db501edb721 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java @@ -84,7 +84,7 @@ public void init() throws Exception { conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs) .setNumOfStorageContainerManagers(numOfSCMs).setNumOfActiveSCMs(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java index 6f0bd40dde0e..2829ba234ca0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; import org.apache.ozone.test.GenericTestUtils; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -132,7 +132,7 @@ public void testLeaderIdAfterLeaderChange() throws Exception { dnToStop.get().stop(); // wait long enough based on leader election min timeout Thread.sleep(4000 * conf.getTimeDuration( - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 5, TimeUnit.SECONDS)); GenericTestUtils.waitFor(() -> { try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java index 90f8375f829b..4ac44315556c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java @@ -85,9 +85,9 @@ public void setup() throws Exception { StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(4) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); metrics = getMetrics(SCMContainerPlacementMetrics.class.getSimpleName()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java index c62d0d24140f..400c4868a99e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -126,7 +125,6 @@ private DBCheckpoint downloadSnapshot() throws Exception { } @Test - @Flaky("HDDS-6116") public void testInstallCheckPoint() throws Exception { DBCheckpoint checkpoint = downloadSnapshot(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java index 0aa2599637a9..10492736144b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java @@ -94,7 +94,7 @@ public void init() throws Exception { conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfOzoneManagers(numOfOMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java index f7a3aa9c9b7b..4cfc64cd4f50 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java @@ -118,15 +118,16 @@ public void init() throws Exception { conf.set(HDDS_SECRET_KEY_ROTATE_DURATION, ROTATE_DURATION_MS + "ms"); conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, EXPIRY_DURATION_MS + "ms"); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder .setSCMServiceId("TestSecretKeySnapshot") .setSCMServiceId("SCMServiceId") - .setNumDatanodes(1) .setNumOfStorageContainerManagers(3) .setNumOfActiveSCMs(2) - .setNumOfOzoneManagers(1); + .setNumOfOzoneManagers(1) + .setNumDatanodes(1); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java index eb2442cd0988..6af43c3bacde 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java @@ -326,13 +326,12 @@ public void testSecretKeyWithoutAuthorization() throws Exception { private void startCluster(int numSCMs) throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId("TestSecretKey") - .setNumDatanodes(3) .setNumOfStorageContainerManagers(numSCMs) .setNumOfOzoneManagers(1); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index e973c842de44..95d7faa91740 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -20,8 +20,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.HddsUtils; @@ -64,20 +62,30 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.server.events.FixedThreadPoolWithAffinityExecutor; import org.apache.hadoop.hdds.utils.HddsVersionInfo; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.StaticMapping; +import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneTestUtils; +import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.HddsDatanodeService; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.states.endpoint.HeartbeatEndpointTask; import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; +import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; @@ -94,11 +102,12 @@ import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.server.RaftServerConfigKeys; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.mockito.ArgumentMatcher; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -106,11 +115,9 @@ import java.io.File; import java.io.IOException; import java.nio.file.Path; -import java.nio.file.Paths; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -119,6 +126,7 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -128,14 +136,16 @@ import java.util.stream.Stream; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState; import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser; +import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -144,7 +154,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; @@ -158,9 +167,10 @@ */ @Timeout(900) public class TestStorageContainerManager { + private static final String LOCALHOST_IP = "127.0.0.1"; private static XceiverClientManager xceiverClientManager; private static final Logger LOG = LoggerFactory.getLogger( - TestStorageContainerManager.class); + TestStorageContainerManager.class); @BeforeAll public static void setup() throws IOException { @@ -199,16 +209,13 @@ private void testRpcPermissionWithConf( OzoneConfiguration ozoneConf, Predicate isAdmin, String... usernames) throws Exception { - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build(); - try { + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build()) { cluster.waitForClusterToBeReady(); for (String username : usernames) { testRpcPermission(cluster, username, !isAdmin.test(username)); } - } finally { - cluster.shutdown(); - } + } // The cluster is automatically closed here } private void testRpcPermission(MiniOzoneCluster cluster, @@ -227,17 +234,17 @@ private void testRpcPermission(MiniOzoneCluster cluster, assertInstanceOf(ContainerNotFoundException.class, ex); } - try { - ContainerWithPipeline container2 = mockClientServer.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE); - if (expectPermissionDenied) { - fail("Operation should fail, expecting an IOException here."); - } else { - assertEquals(1, container2.getPipeline().getNodes().size()); - } - } catch (Exception e) { - verifyPermissionDeniedException(e, fakeRemoteUsername); + if (expectPermissionDenied) { + Exception allocateException = assertThrows(Exception.class, () -> + mockClientServer.allocateContainer(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE)); + verifyPermissionDeniedException(allocateException, fakeRemoteUsername); + } else { + // If not expecting permission denied, validate the successful operation's result + ContainerWithPipeline container2 = assertDoesNotThrow(() -> + mockClientServer.allocateContainer(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE)); + assertEquals(1, container2.getPipeline().getNodes().size()); } Exception e = assertThrows(Exception.class, () -> mockClientServer.getContainer( @@ -290,20 +297,14 @@ public void testBlockDeletionTransactions() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(50) - .build(); - cluster.waitForClusterToBeReady(); - - try { + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .build()) { + cluster.waitForClusterToBeReady(); DeletedBlockLog delLog = cluster.getStorageContainerManager() .getScmBlockManager().getDeletedBlockLog(); assertEquals(0, delLog.getNumOfValidTransactions()); - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(numKeys, 4096); + Map keyLocations = TestDataUtil.createKeys(cluster, numKeys); // Wait for container report Thread.sleep(1000); for (OmKeyInfo keyInfo : keyLocations.values()) { @@ -312,7 +313,7 @@ public void testBlockDeletionTransactions() throws Exception { } Map> containerBlocks = createDeleteTXLog( cluster.getStorageContainerManager(), - delLog, keyLocations, helper); + delLog, keyLocations, cluster, conf); // Verify a few TX gets created in the TX log. assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); @@ -333,7 +334,7 @@ public void testBlockDeletionTransactions() throws Exception { return false; } }, 1000, 22000); - assertTrue(helper.verifyBlocksWithTxnTable(containerBlocks)); + assertTrue(verifyBlocksWithTxnTable(cluster, conf, containerBlocks)); // Continue the work, add some TXs that with known container names, // but unknown block IDs. for (Long containerID : containerBlocks.keySet()) { @@ -363,21 +364,21 @@ public void testBlockDeletionTransactions() throws Exception { return false; } }, 1000, 20000); - } finally { - cluster.shutdown(); } } @Test public void testOldDNRegistersToReInitialisedSCM() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf).setHbInterval(1000) - .setHbProcessorInterval(3000).setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); + + - try { + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(1) + .build()) { + cluster.waitForClusterToBeReady(); HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0); StorageContainerManager scm = cluster.getStorageContainerManager(); scm.stop(); @@ -443,8 +444,6 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { assertThat(versionEndPointTaskLog.getOutput()).contains( "org.apache.hadoop.ozone.common" + ".InconsistentStorageStateException: Mismatched ClusterIDs"); - } finally { - cluster.shutdown(); } } @@ -462,16 +461,14 @@ public void testBlockDeletingThrottling() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(3000) + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); - - try { + .build()) { + cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); DeletedBlockLog delLog = cluster.getStorageContainerManager() .getScmBlockManager().getDeletedBlockLog(); assertEquals(0, delLog.getNumOfValidTransactions()); @@ -483,10 +480,7 @@ public void testBlockDeletingThrottling() throws Exception { .getScmBlockManager().getSCMBlockDeletingService(); delService.setBlockDeleteTXNum(limitSize); - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(numKeys, 4096); + Map keyLocations = TestDataUtil.createKeys(cluster, numKeys); // Wait for container report Thread.sleep(5000); for (OmKeyInfo keyInfo : keyLocations.values()) { @@ -495,7 +489,7 @@ public void testBlockDeletingThrottling() throws Exception { } createDeleteTXLog(cluster.getStorageContainerManager(), - delLog, keyLocations, helper); + delLog, keyLocations, cluster, conf); // Verify a few TX gets created in the TX log. assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); @@ -516,16 +510,13 @@ public void testBlockDeletingThrottling() throws Exception { } return false; }, 500, 10000); - } finally { - cluster.shutdown(); } } private Map> createDeleteTXLog( StorageContainerManager scm, DeletedBlockLog delLog, - Map keyLocations, - TestStorageContainerManagerHelper helper) + Map keyLocations, MiniOzoneCluster cluster, OzoneConfiguration conf) throws IOException, TimeoutException { // These keys will be written into a bunch of containers, // gets a set of container names, verify container containerBlocks @@ -544,7 +535,7 @@ private Map> createDeleteTXLog( } assertThat(totalCreatedBlocks).isGreaterThan(0); assertEquals(totalCreatedBlocks, - helper.getAllBlocks(containerNames).size()); + getAllBlocks(cluster, conf, containerNames).size()); // Create a deletion TX for each key. Map> containerBlocks = Maps.newHashMap(); @@ -568,11 +559,9 @@ private Map> createDeleteTXLog( } @Test - public void testSCMInitialization() throws Exception { + public void testSCMInitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); + Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); UUID clusterId = UUID.randomUUID(); @@ -590,13 +579,11 @@ public void testSCMInitialization() throws Exception { } @Test - public void testSCMInitializationWithHAEnabled() throws Exception { + public void testSCMInitializationWithHAEnabled(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); + Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); final UUID clusterId = UUID.randomUUID(); @@ -608,54 +595,26 @@ public void testSCMInitializationWithHAEnabled() throws Exception { } @Test - public void testSCMReinitialization() throws Exception { + public void testSCMReinitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); + Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); //This will set the cluster id in the version file - MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); - cluster.getStorageContainerManager().stop(); - try { + + + try (MiniOzoneCluster cluster = + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build()) { + cluster.waitForClusterToBeReady(); + cluster.getStorageContainerManager().stop(); final UUID clusterId = UUID.randomUUID(); // This will initialize SCM StorageContainerManager.scmInit(conf, clusterId.toString()); SCMStorageConfig scmStore = new SCMStorageConfig(conf); assertNotEquals(clusterId.toString(), scmStore.getClusterID()); assertTrue(scmStore.isSCMHAEnabled()); - } finally { - cluster.shutdown(); } } - // Unsupported Test case. Non Ratis SCM -> Ratis SCM not supported - //@Test - public void testSCMReinitializationWithHAUpgrade() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - //This will set the cluster id in the version file - final UUID clusterId = UUID.randomUUID(); - // This will initialize SCM - - StorageContainerManager.scmInit(conf, clusterId.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - assertEquals(clusterId.toString(), scmStore.getClusterID()); - assertFalse(scmStore.isSCMHAEnabled()); - - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - StorageContainerManager.scmInit(conf, clusterId.toString()); - scmStore = new SCMStorageConfig(conf); - assertTrue(scmStore.isSCMHAEnabled()); - validateRatisGroupExists(conf, clusterId.toString()); - - } - @VisibleForTesting public static void validateRatisGroupExists(OzoneConfiguration conf, String clusterId) throws IOException { @@ -696,45 +655,10 @@ public static void validateRatisGroupExists(OzoneConfiguration conf, } } - // Non Ratis SCM -> Ratis SCM is not supported {@see HDDS-6695} - // Invalid Testcase - // @Test - public void testSCMReinitializationWithHAEnabled() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - //This will set the cluster id in the version file - MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); - try { - final String clusterId = - cluster.getStorageContainerManager().getClusterId(); - // validate there is no ratis group pre existing - assertThrows(IOException.class, () -> validateRatisGroupExists(conf, clusterId)); - - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - // This will re-initialize SCM - StorageContainerManager.scmInit(conf, clusterId); - cluster.getStorageContainerManager().start(); - // Ratis group with cluster id exists now - validateRatisGroupExists(conf, clusterId); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - assertTrue(scmStore.isSCMHAEnabled()); - } finally { - cluster.shutdown(); - } - } - @Test - void testSCMInitializationFailure() { + void testSCMInitializationFailure(@TempDir Path tempDir) { OzoneConfiguration conf = new OzoneConfiguration(); - final String path = - GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); + Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); Exception e = assertThrows(SCMException.class, () -> HddsTestUtils.getScmSimple(conf)); @@ -742,32 +666,27 @@ void testSCMInitializationFailure() { } @Test - public void testScmInfo() throws Exception { + public void testScmInfo(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - final String path = - GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - try { - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); - //Reads the SCM Info from SCM instance - ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); - assertEquals(clusterId, scmInfo.getClusterId()); - assertEquals(scmId, scmInfo.getScmId()); - - String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion(); - String actualVersion = scm.getSoftwareVersion(); - assertEquals(expectedVersion, actualVersion); - } finally { - FileUtils.deleteQuietly(new File(path)); - } + Path scmPath = tempDir.resolve("scm-meta"); + + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); + SCMStorageConfig scmStore = new SCMStorageConfig(conf); + String clusterId = UUID.randomUUID().toString(); + String scmId = UUID.randomUUID().toString(); + scmStore.setClusterId(clusterId); + scmStore.setScmId(scmId); + // writes the version file properties + scmStore.initialize(); + StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); + //Reads the SCM Info from SCM instance + ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); + assertEquals(clusterId, scmInfo.getClusterId()); + assertEquals(scmId, scmInfo.getScmId()); + + String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion(); + String actualVersion = scm.getSoftwareVersion(); + assertEquals(expectedVersion, actualVersion); } /** @@ -775,21 +694,23 @@ public void testScmInfo() throws Exception { */ @Test public void testScmProcessDatanodeHeartbeat() throws Exception { + String rackName = "/rack1"; OzoneConfiguration conf = new OzoneConfiguration(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); - StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( - Collections.singleton(HddsUtils.getHostName(conf))).get(0), - "/rack1"); + StaticMapping.addNodeToRack(NetUtils.normalizeHostName(HddsUtils.getHostName(conf)), + rackName); + // In case of JDK17, the IP address is resolved to localhost mapped to 127.0.0.1 which is not in sync with JDK8 + // and hence need to make following entry under HDDS-10132 + StaticMapping.addNodeToRack(LOCALHOST_IP, rackName); final int datanodeNum = 3; - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(datanodeNum) - .build(); - cluster.waitForClusterToBeReady(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - try { + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(datanodeNum) + .build()) { + cluster.waitForClusterToBeReady(); + StorageContainerManager scm = cluster.getStorageContainerManager(); // first sleep 10s Thread.sleep(10000); // verify datanode heartbeats are well processed @@ -809,8 +730,6 @@ public void testScmProcessDatanodeHeartbeat() throws Exception { datanodeInfo.getNetworkName()); assertEquals("/rack1", datanodeInfo.getNetworkLocation()); } - } finally { - cluster.shutdown(); } } @@ -826,20 +745,16 @@ public void testCloseContainerCommandOnRestart() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(3000) + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); - - try { - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); + .build()) { + cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); - helper.createKeys(10, 4096); + TestDataUtil.createKeys(cluster, 10); GenericTestUtils.waitFor(() -> cluster.getStorageContainerManager().getContainerManager() .getContainers() != null, 1000, 10000); @@ -905,8 +820,6 @@ public void testCloseContainerCommandOnRestart() throws Exception { } else { verify(nodeManager).addDatanodeCommand(dnUuid, closeContainerCommand); } - } finally { - cluster.shutdown(); } } @@ -926,7 +839,7 @@ public void testContainerReportQueueWithDrop() throws Exception { ContainerReportHandler containerReportHandler = mock(ContainerReportHandler.class); doAnswer((inv) -> { - Thread.currentThread().sleep(500); + Thread.sleep(500); return null; }).when(containerReportHandler).onMessage(dndata, eventQueue); List executors = FixedThreadPoolWithAffinityExecutor @@ -948,7 +861,7 @@ public void testContainerReportQueueWithDrop() throws Exception { eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata); assertThat(containerReportExecutors.droppedEvents()).isGreaterThan(1); - Thread.currentThread().sleep(1000); + Thread.sleep(1000); assertEquals(containerReportExecutors.droppedEvents() + containerReportExecutors.scheduledEvents(), containerReportExecutors.queuedEvents()); @@ -968,7 +881,7 @@ public void testContainerReportQueueTakingMoreTime() throws Exception { ContainerReportHandler containerReportHandler = mock(ContainerReportHandler.class); doAnswer((inv) -> { - Thread.currentThread().sleep(1000); + Thread.sleep(1000); semaphore.release(1); return null; }).when(containerReportHandler).onMessage(any(), eq(eventQueue)); @@ -987,7 +900,7 @@ public void testContainerReportQueueTakingMoreTime() throws Exception { reportExecutorMap); containerReportExecutors.setQueueWaitThreshold(800); containerReportExecutors.setExecWaitThreshold(800); - + eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportExecutors, containerReportHandler); ContainerReportsProto report = ContainerReportsProto.getDefaultInstance(); @@ -1026,7 +939,7 @@ public void testIncrementalContainerReportQueue() throws Exception { IncrementalContainerReportHandler icr = mock(IncrementalContainerReportHandler.class); doAnswer((inv) -> { - Thread.currentThread().sleep(500); + Thread.sleep(500); return null; }).when(icr).onMessage(dndata, eventQueue); List executors = FixedThreadPoolWithAffinityExecutor @@ -1048,7 +961,7 @@ public void testIncrementalContainerReportQueue() throws Exception { eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); assertEquals(0, containerReportExecutors.droppedEvents()); - Thread.currentThread().sleep(3000); + Thread.sleep(3000); assertEquals(containerReportExecutors.scheduledEvents(), containerReportExecutors.queuedEvents()); containerReportExecutors.close(); @@ -1072,13 +985,14 @@ public void testNonRatisToRatis() DefaultConfigManager.clearDefaultConfigs(); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); StorageContainerManager.scmInit(conf, cluster.getClusterId()); + conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, getFreePort()); + conf.unset(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY); cluster.restartStorageContainerManager(false); final StorageContainerManager ratisSCM = cluster .getStorageContainerManager(); assertNotNull(ratisSCM.getScmHAManager().getRatisServer()); assertTrue(ratisSCM.getScmStorageConfig().isSCMHAEnabled()); - } } @@ -1115,4 +1029,86 @@ public boolean matches(CommandForDatanode cmdRight) { && left.getProto().equals(right.getProto()); } } + + public List getAllBlocks(MiniOzoneCluster cluster, OzoneConfiguration conf, Set containerIDs) + throws IOException { + List allBlocks = Lists.newArrayList(); + for (Long containerID : containerIDs) { + allBlocks.addAll(getAllBlocks(cluster, conf, containerID)); + } + return allBlocks; + } + + public List getAllBlocks(MiniOzoneCluster cluster, + OzoneConfiguration conf, Long containerID) throws IOException { + List allBlocks = Lists.newArrayList(); + KeyValueContainerData cData = getContainerMetadata(cluster, containerID); + try (DBHandle db = BlockUtils.getDB(cData, conf)) { + + List> kvs = + db.getStore().getBlockDataTable() + .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, + cData.containerPrefix(), cData.getUnprefixedKeyFilter()); + + for (Table.KeyValue entry : kvs) { + allBlocks.add(Long.valueOf(DatanodeSchemaThreeDBDefinition + .getKeyWithoutPrefix(entry.getKey()))); + } + } + return allBlocks; + } + + public boolean verifyBlocksWithTxnTable(MiniOzoneCluster cluster, OzoneConfiguration conf, + Map> containerBlocks) + throws IOException { + for (Map.Entry> entry : containerBlocks.entrySet()) { + KeyValueContainerData cData = getContainerMetadata(cluster, entry.getKey()); + try (DBHandle db = BlockUtils.getDB(cData, conf)) { + DatanodeStore ds = db.getStore(); + DatanodeStoreSchemaThreeImpl dnStoreImpl = + (DatanodeStoreSchemaThreeImpl) ds; + List> + txnsInTxnTable = dnStoreImpl.getDeleteTransactionTable() + .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, + cData.containerPrefix()); + List conID = new ArrayList<>(); + for (Table.KeyValue txn : + txnsInTxnTable) { + conID.addAll(txn.getValue().getLocalIDList()); + } + if (!conID.equals(containerBlocks.get(entry.getKey()))) { + return false; + } + } + } + return true; + } + + private KeyValueContainerData getContainerMetadata(MiniOzoneCluster cluster, Long containerID) + throws IOException { + ContainerWithPipeline containerWithPipeline = cluster + .getStorageContainerManager().getClientProtocolServer() + .getContainerWithPipeline(containerID); + + DatanodeDetails dn = + containerWithPipeline.getPipeline().getFirstNode(); + OzoneContainer containerServer = + getContainerServerByDatanodeUuid(cluster, dn.getUuidString()); + KeyValueContainerData containerData = + (KeyValueContainerData) containerServer.getContainerSet() + .getContainer(containerID).getContainerData(); + return containerData; + } + + private OzoneContainer getContainerServerByDatanodeUuid(MiniOzoneCluster cluster, String dnUUID) + throws IOException { + for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { + if (dn.getDatanodeDetails().getUuidString().equals(dnUUID)) { + return dn.getDatanodeStateMachine().getContainer(); + } + } + throw new IOException("Unable to get the ozone container " + + "for given datanode ID " + dnUUID); + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java index e62820cfb1d0..2986484d2ad0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java @@ -95,7 +95,7 @@ public void init() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_GAP, "1"); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java deleted file mode 100644 index 322b1e65bc68..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java +++ /dev/null @@ -1,198 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; -import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; - - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import org.apache.commons.lang3.RandomStringUtils; - -/** - * A helper class used by {@link TestStorageContainerManager} to generate - * some keys and helps to verify containers and blocks locations. - */ -public class TestStorageContainerManagerHelper { - - private final MiniOzoneCluster cluster; - private final OzoneConfiguration conf; - - public TestStorageContainerManagerHelper(MiniOzoneCluster cluster, - OzoneConfiguration conf) throws IOException { - this.cluster = cluster; - this.conf = conf; - } - - public Map createKeys(int numOfKeys, int keySize) - throws Exception { - Map keyLocationMap = Maps.newHashMap(); - - try (OzoneClient client = cluster.newClient()) { - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client); - // Write 20 keys in bucketName. - Set keyNames = Sets.newHashSet(); - for (int i = 0; i < numOfKeys; i++) { - String keyName = RandomStringUtils.randomAlphabetic(5) + i; - keyNames.add(keyName); - - TestDataUtil - .createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5)); - } - - for (String key : keyNames) { - OmKeyArgs arg = new OmKeyArgs.Builder() - .setVolumeName(bucket.getVolumeName()) - .setBucketName(bucket.getName()) - .setKeyName(key) - .build(); - OmKeyInfo location = cluster.getOzoneManager() - .lookupKey(arg); - keyLocationMap.put(key, location); - } - } - - return keyLocationMap; - } - - public List getPendingDeletionBlocks(Long containerID) - throws IOException { - List pendingDeletionBlocks = Lists.newArrayList(); - KeyValueContainerData cData = getContainerMetadata(containerID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { - KeyPrefixFilter filter = cData.getDeletingBlockKeyFilter(); - - List> kvs = - db.getStore().getBlockDataTable() - .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, - cData.containerPrefix(), filter); - - for (Table.KeyValue entry : kvs) { - pendingDeletionBlocks - .add(entry.getKey().replace(cData.getDeletingBlockKeyPrefix(), "")); - } - } - return pendingDeletionBlocks; - } - - public List getAllBlocks(Set containerIDs) - throws IOException { - List allBlocks = Lists.newArrayList(); - for (Long containerID : containerIDs) { - allBlocks.addAll(getAllBlocks(containerID)); - } - return allBlocks; - } - - public List getAllBlocks(Long containeID) throws IOException { - List allBlocks = Lists.newArrayList(); - KeyValueContainerData cData = getContainerMetadata(containeID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { - - List> kvs = - db.getStore().getBlockDataTable() - .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, - cData.containerPrefix(), cData.getUnprefixedKeyFilter()); - - for (Table.KeyValue entry : kvs) { - allBlocks.add(Long.valueOf(DatanodeSchemaThreeDBDefinition - .getKeyWithoutPrefix(entry.getKey()))); - } - } - return allBlocks; - } - - public boolean verifyBlocksWithTxnTable(Map> containerBlocks) - throws IOException { - for (Map.Entry> entry : containerBlocks.entrySet()) { - KeyValueContainerData cData = getContainerMetadata(entry.getKey()); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { - DatanodeStore ds = db.getStore(); - DatanodeStoreSchemaThreeImpl dnStoreImpl = - (DatanodeStoreSchemaThreeImpl) ds; - List> - txnsInTxnTable = dnStoreImpl.getDeleteTransactionTable() - .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, - cData.containerPrefix()); - List conID = new ArrayList<>(); - for (Table.KeyValue txn : - txnsInTxnTable) { - conID.addAll(txn.getValue().getLocalIDList()); - } - if (!conID.equals(containerBlocks.get(entry.getKey()))) { - return false; - } - } - } - return true; - } - - private KeyValueContainerData getContainerMetadata(Long containerID) - throws IOException { - ContainerWithPipeline containerWithPipeline = cluster - .getStorageContainerManager().getClientProtocolServer() - .getContainerWithPipeline(containerID); - - DatanodeDetails dn = - containerWithPipeline.getPipeline().getFirstNode(); - OzoneContainer containerServer = - getContainerServerByDatanodeUuid(dn.getUuidString()); - KeyValueContainerData containerData = - (KeyValueContainerData) containerServer.getContainerSet() - .getContainer(containerID).getContainerData(); - return containerData; - } - - private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID) - throws IOException { - for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { - if (dn.getDatanodeDetails().getUuidString().equals(dnUUID)) { - return dn.getDatanodeStateMachine().getContainer(); - } - } - throw new IOException("Unable to get the ozone container " - + "for given datanode ID " + dnUUID); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java index fb312dfb5096..99095f55b008 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java @@ -20,13 +20,17 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; + +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; @@ -71,8 +75,8 @@ public void setup() { RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) .setState(Pipeline.PipelineState.CLOSED) .setNodes(dns) + .setNodesInOrder(dnsInOrder) .build(); - pipeline.setNodesInOrder(dnsInOrder); } @Test @@ -174,6 +178,39 @@ public XceiverClientReply sendCommandAsync( assertEquals(1, seenDNs.size()); } + @Test + public void testPrimaryReadFromNormalDatanode() + throws IOException { + final List seenDNs = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + Pipeline randomPipeline = MockPipeline.createRatisPipeline(); + int nodeCount = randomPipeline.getNodes().size(); + assertThat(nodeCount).isGreaterThan(1); + randomPipeline.getNodes().forEach( + node -> assertEquals(NodeOperationalState.IN_SERVICE, node.getPersistedOpState())); + + randomPipeline.getNodes().get( + RandomUtils.nextInt(0, nodeCount)).setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); + randomPipeline.getNodes().get( + RandomUtils.nextInt(0, nodeCount)).setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); + try (XceiverClientGrpc client = new XceiverClientGrpc(randomPipeline, conf) { + @Override + public XceiverClientReply sendCommandAsync( + ContainerProtos.ContainerCommandRequestProto request, + DatanodeDetails dn) { + seenDNs.add(dn); + return buildValidResponse(); + } + }) { + invokeXceiverClientGetBlock(client); + } catch (IOException e) { + e.printStackTrace(); + } + // Always the IN_SERVICE datanode will be read first + assertEquals(NodeOperationalState.IN_SERVICE, seenDNs.get(0).getPersistedOpState()); + } + } + @Test public void testConnectionReusedAfterGetBlock() throws IOException { // With a new Client, make 100 calls. On each call, ensure that only one diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java index 5ebf9b56a8ec..100ea9394a9d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java @@ -152,7 +152,7 @@ public static void init() { MiniOzoneCluster.Builder builder = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(DATANODE_COUNT); - clusterProvider = new MiniOzoneClusterProvider(conf, builder, 7); + clusterProvider = new MiniOzoneClusterProvider(builder, 9); } @AfterAll @@ -211,7 +211,7 @@ public void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned() final DatanodeDetails toDecommission = nm.getNodeByUuid(dnID.toString()); scmClient.decommissionNodes(Arrays.asList( - getDNHostAndPort(toDecommission))); + getDNHostAndPort(toDecommission)), false); waitForDnToReachOpState(nm, toDecommission, DECOMMISSIONED); // Ensure one node transitioned to DECOMMISSIONING @@ -265,7 +265,7 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart() waitForAndReturnContainer(ratisRepConfig, 3); final DatanodeDetails dn = getOneDNHostingReplica(getContainerReplicas(container)); - scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(dn))); + scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(dn)), false); // Wait for the state to be persisted on the DN so it can report it on // restart of SCM. @@ -310,6 +310,97 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart() waitForDnToReachPersistedOpState(newDn, IN_SERVICE); } + @Test + // Decommissioning few nodes which leave insufficient nodes for replication + // should not be allowed if the decommissioning is not forced. + public void testInsufficientNodesCannotBeDecommissioned() + throws Exception { + // Generate some data on the empty cluster to create some containers + generateData(20, "key", ratisRepConfig); + + final List toDecommission = nm.getAllNodes(); + + // trying to decommission 5 nodes should leave the cluster with 2 nodes, + // which is not sufficient for RATIS.THREE replication. It should not be allowed. + scmClient.decommissionNodes(Arrays.asList(toDecommission.get(0).getIpAddress(), + toDecommission.get(1).getIpAddress(), toDecommission.get(2).getIpAddress(), + toDecommission.get(3).getIpAddress(), toDecommission.get(4).getIpAddress()), false); + + // Ensure no nodes transitioned to DECOMMISSIONING or DECOMMISSIONED + List decomNodes = nm.getNodes( + DECOMMISSIONING, + HEALTHY); + assertEquals(0, decomNodes.size()); + decomNodes = nm.getNodes( + DECOMMISSIONED, + HEALTHY); + assertEquals(0, decomNodes.size()); + + // Decommission 1 node successfully. Cluster is left with 6 IN_SERVICE nodes + scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission.get(6))), false); + waitForDnToReachOpState(nm, toDecommission.get(6), DECOMMISSIONED); + waitForDnToReachPersistedOpState(toDecommission.get(6), DECOMMISSIONED); + decomNodes = nm.getNodes( + DECOMMISSIONED, + HEALTHY); + assertEquals(1, decomNodes.size()); + decomNodes = nm.getNodes( + DECOMMISSIONING, + HEALTHY); + assertEquals(0, decomNodes.size()); + + generateData(20, "eckey", ecRepConfig); + // trying to decommission 2 node should leave the cluster with 4 nodes, + // which is not sufficient for EC(3,2) replication. It should not be allowed. + scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission.get(5)), + getDNHostAndPort(toDecommission.get(4))), false); + decomNodes = nm.getNodes( + DECOMMISSIONED, + HEALTHY); + assertEquals(1, decomNodes.size()); + decomNodes = nm.getNodes( + DECOMMISSIONING, + HEALTHY); + assertEquals(0, decomNodes.size()); + + // Try to decommission 2 nodes of which 1 has already been decommissioning. Should be successful + // as cluster will be left with (6 - 1) = 5) + scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission.get(6)), + getDNHostAndPort(toDecommission.get(5))), false); + waitForDnToReachOpState(nm, toDecommission.get(5), DECOMMISSIONED); + waitForDnToReachPersistedOpState(toDecommission.get(5), DECOMMISSIONED); + decomNodes = nm.getNodes( + DECOMMISSIONED, + HEALTHY); + assertEquals(2, decomNodes.size()); + decomNodes = nm.getNodes( + DECOMMISSIONING, + HEALTHY); + assertEquals(0, decomNodes.size()); + + // Cluster is left with 5 IN_SERVICE nodes, no decommissioning should be allowed + scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission.get(4))), false); + decomNodes = nm.getNodes( + DECOMMISSIONED, + HEALTHY); + assertEquals(2, decomNodes.size()); + decomNodes = nm.getNodes( + DECOMMISSIONING, + HEALTHY); + assertEquals(0, decomNodes.size()); + + // Decommissioning with force flag set to true skips the checks. So node should transition to DECOMMISSIONING + scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(toDecommission.get(4))), true); + decomNodes = nm.getNodes( + DECOMMISSIONED, + HEALTHY); + assertEquals(2, decomNodes.size()); + decomNodes = nm.getNodes( + DECOMMISSIONING, + HEALTHY); + assertEquals(1, decomNodes.size()); + } + @Test // When putting a single node into maintenance, its pipelines should be closed // but no new replicas should be create and the node should transition into @@ -344,7 +435,7 @@ public void testSingleNodeWithOpenPipelineCanGotoMaintenance() final DatanodeDetails dn = nm.getNodeByUuid(dnID.toString()); scmClient.startMaintenanceNodes(Arrays.asList( - getDNHostAndPort(dn)), 0); + getDNHostAndPort(dn)), 0, true); waitForDnToReachOpState(nm, dn, IN_MAINTENANCE); waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); @@ -415,7 +506,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() scmClient.startMaintenanceNodes(forMaintenance.stream() .map(TestNodeUtil::getDNHostAndPort) - .collect(Collectors.toList()), 0); + .collect(Collectors.toList()), 0, true); // Ensure all 3 DNs go to maintenance for (DatanodeDetails dn : forMaintenance) { @@ -449,7 +540,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() .collect(Collectors.toList()); scmClient.startMaintenanceNodes(ecMaintenance.stream() .map(TestNodeUtil::getDNHostAndPort) - .collect(Collectors.toList()), 0); + .collect(Collectors.toList()), 0, true); for (DatanodeDetails dn : ecMaintenance) { waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); } @@ -483,7 +574,7 @@ public void testEnteringMaintenanceNodeCompletesAfterSCMRestart() scmClient.startMaintenanceNodes(forMaintenance.stream() .map(TestNodeUtil::getDNHostAndPort) - .collect(Collectors.toList()), 0); + .collect(Collectors.toList()), 0, true); // Ensure all 3 DNs go to entering_maintenance for (DatanodeDetails dn : forMaintenance) { @@ -521,7 +612,7 @@ public void testMaintenanceEndsAutomaticallyAtTimeout() DatanodeDetails dn = getOneDNHostingReplica(getContainerReplicas(container)); - scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(dn)), 0); + scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(dn)), 0, true); waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); long newEndTime = System.currentTimeMillis() / 1000 + 5; @@ -534,7 +625,7 @@ public void testMaintenanceEndsAutomaticallyAtTimeout() // Put the node back into maintenance and then stop it and wait for it to // go dead - scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(dn)), 0); + scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(dn)), 0, true); waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); cluster.shutdownHddsDatanode(dn); waitForDnToReachHealthState(nm, dn, DEAD); @@ -563,7 +654,7 @@ public void testSCMHandlesRestartForMaintenanceNode() DatanodeDetails dn = getOneDNHostingReplica(getContainerReplicas(container)); - scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(dn)), 0); + scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(dn)), 0, true); waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); cluster.restartStorageContainerManager(true); @@ -610,6 +701,118 @@ public void testSCMHandlesRestartForMaintenanceNode() assertTrue(counts.isSufficientlyReplicated()); } + @Test + // Putting few nodes into maintenance which leaves insufficient nodes for replication + // should not be allowed if the operation is not forced. + public void testInsufficientNodesCannotBePutInMaintenance() + throws Exception { + // Generate some data on the empty cluster to create some containers + generateData(20, "key", ratisRepConfig); + final List toMaintenance = nm.getAllNodes(); + + // trying to move 6 nodes to maintenance should leave the cluster with 1 node, + // which is not sufficient for RATIS.THREE replication (3 - maintenanceReplicaMinimum = 2). + // It should not be allowed. + scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(toMaintenance.get(0)), + getDNHostAndPort(toMaintenance.get(1)), getDNHostAndPort(toMaintenance.get(2)), + getDNHostAndPort(toMaintenance.get(3)), getDNHostAndPort(toMaintenance.get(4)), + getDNHostAndPort(toMaintenance.get(5))), 0, false); + + // Ensure no nodes transitioned to MAINTENANCE + List maintenanceNodes = nm.getNodes( + ENTERING_MAINTENANCE, + HEALTHY); + assertEquals(0, maintenanceNodes.size()); + maintenanceNodes = nm.getNodes( + IN_MAINTENANCE, + HEALTHY); + assertEquals(0, maintenanceNodes.size()); + + // Put 1 node into maintenance successfully. Cluster is left with 6 IN_SERVICE nodes + scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(toMaintenance.get(6))), 0, false); + maintenanceNodes = nm.getNodes( + ENTERING_MAINTENANCE, + HEALTHY); + assertEquals(1, maintenanceNodes.size()); + maintenanceNodes = nm.getNodes( + IN_MAINTENANCE, + HEALTHY); + assertEquals(0, maintenanceNodes.size()); + waitForDnToReachOpState(nm, toMaintenance.get(6), IN_MAINTENANCE); + waitForDnToReachPersistedOpState(toMaintenance.get(6), IN_MAINTENANCE); + maintenanceNodes = nm.getNodes( + ENTERING_MAINTENANCE, + HEALTHY); + assertEquals(0, maintenanceNodes.size()); + maintenanceNodes = nm.getNodes( + IN_MAINTENANCE, + HEALTHY); + assertEquals(1, maintenanceNodes.size()); + + generateData(20, "eckey", ecRepConfig); + // trying to put 3 more nodes into maintenance should leave the cluster with 3 nodes, + // which is not sufficient for EC(3,2) replication (3 + maintenanceRemainingRedundancy = 4 DNs required). + // It should not be allowed. + scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(toMaintenance.get(5)), + getDNHostAndPort(toMaintenance.get(4)), getDNHostAndPort(toMaintenance.get(3))), 0, false); + maintenanceNodes = nm.getNodes( + ENTERING_MAINTENANCE, + HEALTHY); + assertEquals(0, maintenanceNodes.size()); + maintenanceNodes = nm.getNodes( + IN_MAINTENANCE, + HEALTHY); + assertEquals(1, maintenanceNodes.size()); + + // Try to move 3 nodes of which 1 is already in maintenance to maintenance. + // Should be successful as cluster will be left with (6 - 2) = 4) + scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(toMaintenance.get(6)), + getDNHostAndPort(toMaintenance.get(5)), getDNHostAndPort(toMaintenance.get(4))), 0, false); + maintenanceNodes = nm.getNodes( + ENTERING_MAINTENANCE, + HEALTHY); + assertEquals(2, maintenanceNodes.size()); + maintenanceNodes = nm.getNodes( + IN_MAINTENANCE, + HEALTHY); + assertEquals(1, maintenanceNodes.size()); + waitForDnToReachOpState(nm, toMaintenance.get(5), IN_MAINTENANCE); + waitForDnToReachPersistedOpState(toMaintenance.get(5), IN_MAINTENANCE); + waitForDnToReachOpState(nm, toMaintenance.get(4), IN_MAINTENANCE); + waitForDnToReachPersistedOpState(toMaintenance.get(4), IN_MAINTENANCE); + maintenanceNodes = nm.getNodes( + ENTERING_MAINTENANCE, + HEALTHY); + assertEquals(0, maintenanceNodes.size()); + maintenanceNodes = nm.getNodes( + IN_MAINTENANCE, + HEALTHY); + assertEquals(3, maintenanceNodes.size()); + + // Cluster is left with 4 IN_SERVICE nodes, no nodes can be moved to maintenance + scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(toMaintenance.get(3))), 0, false); + maintenanceNodes = nm.getNodes( + ENTERING_MAINTENANCE, + HEALTHY); + assertEquals(0, maintenanceNodes.size()); + maintenanceNodes = nm.getNodes( + IN_MAINTENANCE, + HEALTHY); + assertEquals(3, maintenanceNodes.size()); + + // Trying maintenance with force flag set to true skips the checks. + // So node should transition to ENTERING_MAINTENANCE + scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(toMaintenance.get(2))), 0, true); + maintenanceNodes = nm.getNodes( + ENTERING_MAINTENANCE, + HEALTHY); + assertEquals(1, maintenanceNodes.size()); + maintenanceNodes = nm.getNodes( + IN_MAINTENANCE, + HEALTHY); + assertEquals(3, maintenanceNodes.size()); + } + /** * Sets the instance variables to the values for the current MiniCluster. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java index e8dc7455a11c..683a0c176eb9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java @@ -32,11 +32,9 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -50,7 +48,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -65,11 +62,7 @@ public class TestQueryNode { @BeforeEach public void setUp() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - final int interval = 1000; - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - interval, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS); @@ -77,10 +70,10 @@ public void setUp() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfDatanodes + numOfDatanodes / 2); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes) - .setTotalPipelineNumLimit(numOfDatanodes + numOfDatanodes / 2) .build(); cluster.waitForClusterToBeReady(); scmClient = new ContainerOperationClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index 439b563d6330..51b5d84a13e9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -37,9 +37,12 @@ import java.util.Map; import java.util.UUID; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -56,12 +59,12 @@ public class TestLeaderChoosePolicy { public void init(int numDatanodes, int datanodePipelineLimit) throws Exception { conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, datanodePipelineLimit); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, numDatanodes + numDatanodes / 3); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 2000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) - .setHbInterval(2000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java index c73ffb982cf6..d8840436ee0b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.pipeline; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -61,11 +62,11 @@ public static void init() throws Exception { conf.setFromObject(ratisServerConfig); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); conf.set(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, "2s"); + conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1000, MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(6) - .setHbInterval(1000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 443105b6ccb6..829a9581f663 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -38,8 +38,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -55,14 +57,14 @@ public class TestRatisPipelineCreateAndDestroy { public void init(int numDatanodes) throws Exception { conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, numDatanodes + numDatanodes / 3); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 2000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration( ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, 500, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) - .setHbInterval(2000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java index 988f163adab5..6ce05ad3be74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java @@ -35,6 +35,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotSame; @@ -60,17 +61,17 @@ public class TestSCMRestart { */ @BeforeAll public static void init() throws Exception { + final int numOfNodes = 4; conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - int numOfNodes = 4; + // allow only one FACTOR THREE pipeline. + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfNodes + 1); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfNodes) - // allow only one FACTOR THREE pipeline. - .setTotalPipelineNumLimit(numOfNodes + 1) - .setHbInterval(1000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java index 563e0162acc6..c3ea911f1935 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java @@ -144,7 +144,6 @@ public void init() throws Exception { StorageUnit.MB); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key @@ -210,7 +209,7 @@ public void testReleaseBuffers() throws Exception { return v; }); futures.add(future); - watcher.getFutureMap().put(length, future); + watcher.putFlushFuture(length, future); replies.add(reply); } @@ -238,6 +237,8 @@ public void testReleaseBuffers() throws Exception { assertThat(watcher.getFutureMap()).isEmpty(); assertThat(watcher.getCommitIndexMap()).isEmpty(); } + } finally { + bufferPool.clearBufferPool(); } } @@ -281,7 +282,7 @@ public void testReleaseBuffersOnException() throws Exception { return v; }); futures.add(future); - watcher.getFutureMap().put(length, future); + watcher.putFlushFuture(length, future); replies.add(reply); } @@ -331,6 +332,8 @@ public void testReleaseBuffersOnException() throws Exception { assertThat(watcher.getCommitIndexMap()).isEmpty(); } } + } finally { + bufferPool.clearBufferPool(); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index 3c980f94c59c..b4814d7b5e5d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -136,7 +136,9 @@ public class TestContainerCommandsEC { private static final int EC_CHUNK_SIZE = 1024 * 1024; private static final int STRIPE_DATA_SIZE = EC_DATA * EC_CHUNK_SIZE; private static final int NUM_DN = EC_DATA + EC_PARITY + 3; - private static byte[][] inputChunks = new byte[EC_DATA][EC_CHUNK_SIZE]; + // Data slots are EC_DATA + 1 so we can generate enough data to have a full stripe + // plus one extra chunk. + private static byte[][] inputChunks = new byte[EC_DATA + 1][EC_CHUNK_SIZE]; // Each key size will be in range [min, max), min inclusive, max exclusive private static final int[][] KEY_SIZE_RANGES = @@ -614,12 +616,19 @@ void testECReconstructionCoordinatorWith(List missingIndexes) testECReconstructionCoordinator(missingIndexes, 3); } - @Test - void testECReconstructionWithPartialStripe() - throws Exception { - testECReconstructionCoordinator(ImmutableList.of(4, 5), 1); + @ParameterizedTest + @MethodSource("recoverableMissingIndexes") + void testECReconstructionCoordinatorWithPartialStripe(List missingIndexes) + throws Exception { + testECReconstructionCoordinator(missingIndexes, 1); } + @ParameterizedTest + @MethodSource("recoverableMissingIndexes") + void testECReconstructionCoordinatorWithFullAndPartialStripe(List missingIndexes) + throws Exception { + testECReconstructionCoordinator(missingIndexes, 4); + } static Stream> recoverableMissingIndexes() { return Stream @@ -895,18 +904,19 @@ private void checkBlockData( reconstructedBlockData) { for (int i = 0; i < blockData.length; i++) { + assertEquals(blockData[i].getBlockID(), reconstructedBlockData[i].getBlockID()); + assertEquals(blockData[i].getSize(), reconstructedBlockData[i].getSize()); + assertEquals(blockData[i].getMetadata(), reconstructedBlockData[i].getMetadata()); List oldBlockDataChunks = blockData[i].getChunks(); List newBlockDataChunks = reconstructedBlockData[i].getChunks(); for (int j = 0; j < oldBlockDataChunks.size(); j++) { ContainerProtos.ChunkInfo chunkInfo = oldBlockDataChunks.get(j); - if (chunkInfo.getLen() == 0) { - // let's ignore the empty chunks - continue; - } assertEquals(chunkInfo, newBlockDataChunks.get(j)); } + // Ensure there are no extra chunks in the reconstructed block + assertEquals(oldBlockDataChunks.size(), newBlockDataChunks.size()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index 57e807b7c751..675570164dd9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -20,12 +20,17 @@ import static java.lang.Thread.sleep; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_COMPLETE_FINALIZATION; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_POST_FINALIZE_UPGRADE; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_PRE_FINALIZE_UPGRADE; @@ -71,6 +76,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneClusterProvider; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -148,30 +154,31 @@ public static void initClass() { OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.set(OZONE_DATANODE_PIPELINE_LIMIT, "1"); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); + // allow only one FACTOR THREE pipeline. + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, NUM_DATA_NODES + 1); + conf.setInt(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.INITIAL_VERSION.layoutVersion()); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 500, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500, TimeUnit.MILLISECONDS); scmFinalizationExecutor = new InjectedUpgradeFinalizationExecutor<>(); SCMConfigurator scmConfigurator = new SCMConfigurator(); scmConfigurator.setUpgradeFinalizationExecutor(scmFinalizationExecutor); - MiniOzoneCluster.Builder builder = - new MiniOzoneHAClusterImpl.Builder(conf) - .setNumDatanodes(NUM_DATA_NODES) - .setNumOfStorageContainerManagers(NUM_SCMS) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setNumOfStorageContainerManagers(NUM_SCMS) .setSCMConfigurator(scmConfigurator) - // allow only one FACTOR THREE pipeline. - .setTotalPipelineNumLimit(NUM_DATA_NODES + 1) - .setHbInterval(500) - .setHbProcessorInterval(500) - .setOmLayoutVersion(OMLayoutFeature.INITIAL_VERSION.layoutVersion()) - .setScmLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) - .setDnLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + .setNumDatanodes(NUM_DATA_NODES) + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) + .build()); // Setting the provider to a max of 100 clusters. Some of the tests here // use multiple clusters, so its hard to know exactly how many will be // needed. This means the provider will create 1 extra cluster than needed // but that will not greatly affect runtimes. - clusterProvider = new MiniOzoneClusterProvider(conf, builder, 100); + clusterProvider = new MiniOzoneClusterProvider(builder, 100); } @AfterAll diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java index d2ae30efcebc..d5802aab6e02 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.scm.server.upgrade.SCMUpgradeFinalizationContext; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.upgrade.DefaultUpgradeFinalizationExecutor; import org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizationExecutor; @@ -55,6 +56,7 @@ import java.util.stream.Stream; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -87,17 +89,19 @@ public void init(OzoneConfiguration conf, SCMConfigurator configurator = new SCMConfigurator(); configurator.setUpgradeFinalizationExecutor(executor); - MiniOzoneCluster.Builder clusterBuilder = - new MiniOzoneHAClusterImpl.Builder(conf) - .setNumOfStorageContainerManagers(NUM_SCMS) + conf.setInt(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + + MiniOzoneHAClusterImpl.Builder clusterBuilder = MiniOzoneCluster.newHABuilder(conf); + clusterBuilder.setNumOfStorageContainerManagers(NUM_SCMS) .setNumOfActiveSCMs(NUM_SCMS - numInactiveSCMs) - .setScmLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) .setSCMServiceId("scmservice") - .setSCMConfigurator(configurator) .setNumOfOzoneManagers(1) + .setSCMConfigurator(configurator) .setNumDatanodes(NUM_DATANODES) - .setDnLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - this.cluster = (MiniOzoneHAClusterImpl) clusterBuilder.build(); + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) + .build()); + this.cluster = clusterBuilder.build(); scmClient = cluster.getStorageContainerLocationClient(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java new file mode 100644 index 000000000000..e7e0337b5f9f --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils; + +import com.google.common.base.Preconditions; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.ozone.HddsDatanodeService; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; +import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Utility method to manipulate/inspect container data on disk in a mini cluster. + */ +public final class ClusterContainersUtil { + private ClusterContainersUtil() { + } + + /** + * + * + * @param cluster a mini ozone cluster object. + * @param container a container object. + * @param key an OzoneKey object. + * @return the location of the chunk file. + * @throws IOException + */ + public static File getChunksLocationPath(MiniOzoneCluster cluster, Container container, OzoneKey key) + throws IOException { + Preconditions.checkArgument(key instanceof OzoneKeyDetails); + long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) + .getContainerID(); + long localID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) + .getLocalID(); + // From the containerData, get the block iterator for all the blocks in + // the container. + KeyValueContainerData containerData = + (KeyValueContainerData) container.getContainerData(); + try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); + BlockIterator keyValueBlockIterator = + db.getStore().getBlockIterator(containerID)) { + // Find the block corresponding to the key we put. We use the localID of + // the BlockData to identify out key. + BlockData blockData = null; + while (keyValueBlockIterator.hasNext()) { + blockData = keyValueBlockIterator.nextBlock(); + if (blockData.getBlockID().getLocalID() == localID) { + break; + } + } + assertNotNull(blockData, "Block not found"); + + // Get the location of the chunk file + String containreBaseDir = + container.getContainerData().getVolume().getHddsRootDir().getPath(); + File chunksLocationPath = KeyValueContainerLocationUtil + .getChunksLocationPath(containreBaseDir, cluster.getClusterId(), containerID); + return chunksLocationPath; + } + } + + /** + * Corrupt the chunk backing the key in a mini cluster. + * @param cluster a mini ozone cluster object. + * @param container a container object. + * @param key an OzoneKey object. + * @throws IOException + */ + public static void corruptData(MiniOzoneCluster cluster, Container container, OzoneKey key) + throws IOException { + File chunksLocationPath = getChunksLocationPath(cluster, container, key); + byte[] corruptData = "corrupted data".getBytes(UTF_8); + // Corrupt the contents of chunk files + for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { + FileUtils.writeByteArrayToFile(file, corruptData); + } + } + + /** + * Inspect and verify if chunk backing the key in a mini cluster is the same as the string. + * @param cluster a mini ozone cluster object. + * @param container a container object. + * @param key an OzoneKey object. + * @return true if the same; false if does not match. + * @throws IOException + */ + public static boolean verifyOnDiskData(MiniOzoneCluster cluster, Container container, OzoneKey key, String data) + throws IOException { + File chunksLocationPath = getChunksLocationPath(cluster, container, key); + for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { + String chunkOnDisk = FileUtils.readFileToString(file, Charset.defaultCharset()); + if (!data.equals(chunkOnDisk)) { + return false; + } + } + return true; + } + + /** + * Return the first container object in a mini cluster specified by its ID. + * @param cluster a mini ozone cluster object. + * @param containerID a long variable representing cluater ID. + * @return the container object; null if not found. + */ + public static Container getContainerByID(MiniOzoneCluster cluster, long containerID) { + // Get the container by traversing the datanodes. Atleast one of the + // datanode must have this container. + Container container = null; + for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) { + container = hddsDatanode.getDatanodeStateMachine().getContainer() + .getContainerSet().getContainer(containerID); + if (container != null) { + break; + } + } + return container; + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java index 4197ac8a8165..3239dfc1a47b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java @@ -50,10 +50,7 @@ static void setUp() throws IOException, InterruptedException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL"); - String omServiceId = "omServiceId1"; cluster = MiniOzoneCluster.newBuilder(conf) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(1) .build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index e864cae00b37..9c76c0ec0c79 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -19,10 +19,10 @@ import java.io.IOException; import java.util.List; -import java.util.Optional; import java.util.UUID; import java.util.concurrent.TimeoutException; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -38,6 +38,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; +import org.apache.ratis.util.function.CheckedFunction; /** * Interface used for MiniOzoneClusters. @@ -62,11 +63,7 @@ static Builder newBuilder(OzoneConfiguration conf) { * * @return MiniOzoneCluster builder */ - static Builder newOMHABuilder(OzoneConfiguration conf) { - return new MiniOzoneHAClusterImpl.Builder(conf); - } - - static Builder newHABuilder(OzoneConfiguration conf) { + static MiniOzoneHAClusterImpl.Builder newHABuilder(OzoneConfiguration conf) { return new MiniOzoneHAClusterImpl.Builder(conf); } @@ -77,11 +74,6 @@ static Builder newHABuilder(OzoneConfiguration conf) { */ OzoneConfiguration getConf(); - /** - * Set the configuration for the MiniOzoneCluster. - */ - void setConf(OzoneConfiguration newConf); - /** * Waits for the cluster to be ready, this call blocks till all the * configured {@link HddsDatanodeService} registers with @@ -93,7 +85,7 @@ static Builder newHABuilder(OzoneConfiguration conf) { void waitForClusterToBeReady() throws TimeoutException, InterruptedException; /** - * Waits for atleast one RATIS pipeline of given factor to be reported in open + * Waits for at least one RATIS pipeline of given factor to be reported in open * state. * * @param factor replication factor @@ -121,21 +113,6 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, */ void waitTobeOutOfSafeMode() throws TimeoutException, InterruptedException; - /** - * Returns OzoneManager Service ID. - * - * @return Service ID String - */ - String getOMServiceId(); - - - /** - * Returns StorageContainerManager Service ID. - * - * @return Service ID String - */ - String getSCMServiceId(); - /** * Returns {@link StorageContainerManager} associated with this * {@link MiniOzoneCluster} instance. @@ -180,20 +157,12 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, /** * Returns StorageContainerLocationClient to communicate with * {@link StorageContainerManager} associated with the MiniOzoneCluster. - * - * @return StorageContainerLocation Client - * @throws IOException */ StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient() throws IOException; /** * Restarts StorageContainerManager instance. - * - * @param waitForDatanode - * @throws IOException - * @throws TimeoutException - * @throws InterruptedException */ void restartStorageContainerManager(boolean waitForDatanode) throws InterruptedException, TimeoutException, IOException, @@ -201,8 +170,6 @@ void restartStorageContainerManager(boolean waitForDatanode) /** * Restarts OzoneManager instance. - * - * @throws IOException */ void restartOzoneManager() throws IOException; @@ -266,11 +233,6 @@ default void close() { */ void stop(); - /** - * Start Scm. - */ - void startScm() throws IOException; - /** * Start DataNodes. */ @@ -297,57 +259,43 @@ default String getBaseDir() { @SuppressWarnings("visibilitymodifier") abstract class Builder { - protected static final int DEFAULT_HB_INTERVAL_MS = 1000; - protected static final int DEFAULT_HB_PROCESSOR_INTERVAL_MS = 100; protected static final int ACTIVE_OMS_NOT_SET = -1; protected static final int ACTIVE_SCMS_NOT_SET = -1; - protected static final int DEFAULT_PIPELINE_LIMIT = 3; protected static final int DEFAULT_RATIS_RPC_TIMEOUT_SEC = 1; protected OzoneConfiguration conf; protected String path; protected String clusterId; - protected String omServiceId; - protected int numOfOMs; - protected int numOfActiveOMs = ACTIVE_OMS_NOT_SET; - - protected String scmServiceId; - protected int numOfSCMs; - protected int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET; protected SCMConfigurator scmConfigurator; - protected Optional hbInterval = Optional.empty(); - protected Optional hbProcessorInterval = Optional.empty(); protected String scmId = UUID.randomUUID().toString(); protected String omId = UUID.randomUUID().toString(); - - protected Optional datanodeReservedSpace = Optional.empty(); - protected boolean includeRecon = false; + protected boolean includeRecon = false; - protected Optional omLayoutVersion = Optional.empty(); - protected Optional scmLayoutVersion = Optional.empty(); - protected Optional dnLayoutVersion = Optional.empty(); + protected int dnInitialVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); + protected int dnCurrentVersion = DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue(); protected int numOfDatanodes = 3; - protected int numDataVolumes = 1; protected boolean startDataNodes = true; protected CertificateClient certClient; protected SecretKeyClient secretKeyClient; - protected int pipelineNumLimit = DEFAULT_PIPELINE_LIMIT; + protected DatanodeFactory dnFactory = UniformDatanodesFactory.newBuilder().build(); protected Builder(OzoneConfiguration conf) { this.conf = conf; - setClusterId(UUID.randomUUID().toString()); + setClusterId(); // Use default SCM configurations if no override is provided. setSCMConfigurator(new SCMConfigurator()); ExitUtils.disableSystemExit(); } - public Builder setConf(OzoneConfiguration config) { - this.conf = config; - return this; + /** Prepare the builder for another call to {@link #build()}, avoiding conflict + * between the clusters created. */ + protected void prepareForNextBuild() { + conf = new OzoneConfiguration(conf); + setClusterId(); } public Builder setSCMConfigurator(SCMConfigurator configurator) { @@ -355,13 +303,8 @@ public Builder setSCMConfigurator(SCMConfigurator configurator) { return this; } - /** - * Sets the cluster Id. - * - * @param id cluster Id - */ - void setClusterId(String id) { - clusterId = id; + private void setClusterId() { + clusterId = UUID.randomUUID().toString(); path = GenericTestUtils.getTempPath( MiniOzoneClusterImpl.class.getSimpleName() + "-" + clusterId); } @@ -382,13 +325,6 @@ public Builder setStartDataNodes(boolean nodes) { return this; } - /** - * Sets the certificate client. - * - * @param client - * - * @return MiniOzoneCluster.Builder - */ public Builder setCertificateClient(CertificateClient client) { this.certClient = client; return this; @@ -413,83 +349,31 @@ public Builder setNumDatanodes(int val) { } /** - * Sets the number of data volumes per datanode. - * - * @param val number of volumes per datanode. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setNumDataVolumes(int val) { - numDataVolumes = val; - return this; - } - - /** - * Sets the total number of pipelines to create. - * @param val number of pipelines - * @return MiniOzoneCluster.Builder - */ - public Builder setTotalPipelineNumLimit(int val) { - pipelineNumLimit = val; - return this; - } - - /** - * Sets the number of HeartBeat Interval of Datanodes, the value should be - * in MilliSeconds. + * Set the initialVersion for all datanodes. * - * @param val HeartBeat interval in milliseconds + * @param val initialVersion value to be set for all datanodes. * * @return MiniOzoneCluster.Builder */ - public Builder setHbInterval(int val) { - hbInterval = Optional.of(val); + public Builder setDatanodeInitialVersion(int val) { + dnInitialVersion = val; return this; } /** - * Sets the number of HeartBeat Processor Interval of Datanodes, - * the value should be in MilliSeconds. + * Set the currentVersion for all datanodes. * - * @param val HeartBeat Processor interval in milliseconds + * @param val currentVersion value to be set for all datanodes. * * @return MiniOzoneCluster.Builder */ - public Builder setHbProcessorInterval(int val) { - hbProcessorInterval = Optional.of(val); + public Builder setDatanodeCurrentVersion(int val) { + dnCurrentVersion = val; return this; } - /** - * Sets the reserved space - * {@link org.apache.hadoop.hdds.scm.ScmConfigKeys} - * HDDS_DATANODE_DIR_DU_RESERVED - * for each volume in each datanode. - * @param reservedSpace String that contains the numeric size value and - * ends with a - * {@link org.apache.hadoop.hdds.conf.StorageUnit} - * suffix. For example, "50GB". - * @see org.apache.hadoop.ozone.container.common.volume.VolumeInfo - * - * @return {@link MiniOzoneCluster} Builder - */ - public Builder setDatanodeReservedSpace(String reservedSpace) { - datanodeReservedSpace = Optional.of(reservedSpace); - return this; - } - - public Builder setNumOfOzoneManagers(int numOMs) { - this.numOfOMs = numOMs; - return this; - } - - public Builder setNumOfActiveOMs(int numActiveOMs) { - this.numOfActiveOMs = numActiveOMs; - return this; - } - - public Builder setOMServiceId(String serviceId) { - this.omServiceId = serviceId; + public Builder setDatanodeFactory(DatanodeFactory factory) { + this.dnFactory = factory; return this; } @@ -498,43 +382,18 @@ public Builder includeRecon(boolean include) { return this; } - public Builder setNumOfStorageContainerManagers(int numSCMs) { - this.numOfSCMs = numSCMs; - return this; - } - - public Builder setNumOfActiveSCMs(int numActiveSCMs) { - this.numOfActiveSCMs = numActiveSCMs; - return this; - } - - public Builder setSCMServiceId(String serviceId) { - this.scmServiceId = serviceId; - return this; - } - - public Builder setScmLayoutVersion(int layoutVersion) { - scmLayoutVersion = Optional.of(layoutVersion); - return this; - } - - public Builder setOmLayoutVersion(int layoutVersion) { - omLayoutVersion = Optional.of(layoutVersion); - return this; - } - - public Builder setDnLayoutVersion(int layoutVersion) { - dnLayoutVersion = Optional.of(layoutVersion); - return this; - } - /** * Constructs and returns MiniOzoneCluster. * * @return {@link MiniOzoneCluster} - * - * @throws IOException */ public abstract MiniOzoneCluster build() throws IOException; } + + /** + * Factory to customize configuration of each datanode. + */ + interface DatanodeFactory extends CheckedFunction { + // marker + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 400ae3ee2cc8..50013b57f4c3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -27,18 +27,16 @@ import java.util.Collections; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.Iterator; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.ConfigurationTarget; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -68,10 +66,8 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache; -import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; @@ -81,19 +77,13 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.commons.io.FileUtils; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; + +import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_TASK_SAFEMODE_WAIT_THRESHOLD; -import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; @@ -102,6 +92,8 @@ import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig; +import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,6 +108,8 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private static final Logger LOG = LoggerFactory.getLogger(MiniOzoneClusterImpl.class); + private static final String[] NO_ARGS = new String[0]; + static { CodecBuffer.enableLeakDetection(); } @@ -132,18 +126,17 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private CertificateClient caClient; private final Set clients = ConcurrentHashMap.newKeySet(); private SecretKeyClient secretKeyClient; + private static MockedStatic mockDNStatic = Mockito.mockStatic(HddsDatanodeService.class); /** * Creates a new MiniOzoneCluster with Recon. - * - * @throws IOException if there is an I/O error */ - MiniOzoneClusterImpl(OzoneConfiguration conf, - SCMConfigurator scmConfigurator, - OzoneManager ozoneManager, - StorageContainerManager scm, - List hddsDatanodes, - ReconServer reconServer) { + private MiniOzoneClusterImpl(OzoneConfiguration conf, + SCMConfigurator scmConfigurator, + OzoneManager ozoneManager, + StorageContainerManager scm, + List hddsDatanodes, + ReconServer reconServer) { this.conf = conf; this.ozoneManager = ozoneManager; this.scm = scm; @@ -157,9 +150,6 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { * StorageContainerManager. This is used by * {@link MiniOzoneHAClusterImpl} for starting multiple * OzoneManagers and StorageContainerManagers. - * - * @param conf - * @param hddsDatanodes */ MiniOzoneClusterImpl(OzoneConfiguration conf, SCMConfigurator scmConfigurator, List hddsDatanodes, ReconServer reconServer) { @@ -178,23 +168,10 @@ public OzoneConfiguration getConf() { return conf; } - @Override - public void setConf(OzoneConfiguration newConf) { + protected void setConf(OzoneConfiguration newConf) { this.conf = newConf; } - @Override - public String getOMServiceId() { - // Non-HA cluster doesn't have OM Service Id. - return null; - } - - @Override - public String getSCMServiceId() { - // Non-HA cluster doesn't have OM Service Id. - return null; - } - public void waitForSCMToBeReady() throws TimeoutException, InterruptedException { if (SCMHAUtils.isSCMHAEnabled(conf)) { @@ -207,9 +184,6 @@ public StorageContainerManager getActiveSCM() { return scm; } - /** - * Waits for the Ozone cluster to be ready for processing requests. - */ @Override public void waitForClusterToBeReady() throws TimeoutException, InterruptedException { @@ -233,10 +207,6 @@ public void waitForClusterToBeReady() }, 1000, waitForClusterToBeReadyTimeout); } - /** - * Waits for atleast one RATIS pipeline of given factor to be reported in open - * state. - */ @Override public void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, int timeoutInMs) throws @@ -249,24 +219,11 @@ public void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, }, 1000, timeoutInMs); } - /** - * Sets the timeout value after which - * {@link MiniOzoneClusterImpl#waitForClusterToBeReady} times out. - * - * @param timeoutInMs timeout value in milliseconds - */ @Override public void setWaitForClusterToBeReadyTimeout(int timeoutInMs) { waitForClusterToBeReadyTimeout = timeoutInMs; } - /** - * Waits for SCM to be out of Safe Mode. Many tests can be run iff we are out - * of Safe mode. - * - * @throws TimeoutException - * @throws InterruptedException - */ @Override public void waitTobeOutOfSafeMode() throws TimeoutException, InterruptedException { @@ -396,6 +353,16 @@ private void waitForHddsDatanodeToStop(DatanodeDetails dn) }, 1000, waitForClusterToBeReadyTimeout); } + private static void overrideDatanodeVersions(int dnInitialVersion, int dnCurrentVersion) { + // FUTURE_VERSION (-1) is not a valid version for a datanode, using it as a marker when version is not overridden + if (dnInitialVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) { + mockDNStatic.when(HddsDatanodeService::getDefaultInitialVersion).thenReturn(dnInitialVersion); + } + if (dnCurrentVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) { + mockDNStatic.when(HddsDatanodeService::getDefaultCurrentVersion).thenReturn(dnCurrentVersion); + } + } + @Override public void restartHddsDatanode(int i, boolean waitForDatanode) throws InterruptedException, TimeoutException { @@ -407,8 +374,7 @@ public void restartHddsDatanode(int i, boolean waitForDatanode) // wait for node to be removed from SCM healthy node list. waitForHddsDatanodeToStop(datanodeService.getDatanodeDetails()); } - String[] args = new String[] {}; - HddsDatanodeService service = new HddsDatanodeService(args); + HddsDatanodeService service = new HddsDatanodeService(NO_ARGS); service.setConfiguration(config); hddsDatanodes.add(i, service); startHddsDatanode(service); @@ -464,15 +430,7 @@ public void stop() { stopRecon(reconServer); } - /** - * Start Scm. - */ - @Override - public void startScm() throws IOException { - scm.start(); - } - - public void startHddsDatanode(HddsDatanodeService datanode) { + private void startHddsDatanode(HddsDatanodeService datanode) { try { datanode.setCertificateClient(getCAClient()); } catch (IOException e) { @@ -482,9 +440,6 @@ public void startHddsDatanode(HddsDatanodeService datanode) { datanode.start(); } - /** - * Start DataNodes. - */ @Override public void startHddsDatanodes() { hddsDatanodes.forEach(this::startHddsDatanode); @@ -504,7 +459,7 @@ public void shutdownHddsDatanodes() { @Override public void startRecon() { reconServer = new ReconServer(); - reconServer.execute(new String[]{}); + reconServer.execute(NO_ARGS); } @Override @@ -590,25 +545,10 @@ public MiniOzoneCluster build() throws IOException { ReconServer reconServer = null; List hddsDatanodes = Collections.emptyList(); try { - scm = createSCM(); - scm.start(); - om = createOM(); - if (certClient != null) { - om.setCertClient(certClient); - } - if (secretKeyClient != null) { - om.setSecretKeyClient(secretKeyClient); - } - om.start(); - - if (includeRecon) { - configureRecon(); - reconServer = new ReconServer(); - reconServer.execute(new String[] {}); - } - - hddsDatanodes = createHddsDatanodes( - Collections.singletonList(scm), reconServer); + scm = createAndStartSingleSCM(); + om = createAndStartSingleOM(); + reconServer = createRecon(); + hddsDatanodes = createHddsDatanodes(); MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, scmConfigurator, om, scm, @@ -619,6 +559,8 @@ public MiniOzoneCluster build() throws IOException { if (startDataNodes) { cluster.startHddsDatanodes(); } + + prepareForNextBuild(); return cluster; } catch (Exception ex) { stopOM(om); @@ -641,10 +583,17 @@ public MiniOzoneCluster build() throws IOException { } } + protected void setClients(OzoneManager om) throws IOException { + if (certClient != null) { + om.setCertClient(certClient); + } + if (secretKeyClient != null) { + om.setSecretKeyClient(secretKeyClient); + } + } + /** * Initializes the configuration required for starting MiniOzoneCluster. - * - * @throws IOException */ protected void initializeConfiguration() throws IOException { Path metaDir = Paths.get(path, "ozone-meta"); @@ -652,10 +601,6 @@ protected void initializeConfiguration() throws IOException { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - // MiniOzoneCluster should have global pipeline upper limit. - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, - pipelineNumLimit >= DEFAULT_PIPELINE_LIMIT ? - pipelineNumLimit : DEFAULT_PIPELINE_LIMIT); conf.setTimeDuration(OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, DEFAULT_RATIS_RPC_TIMEOUT_SEC, TimeUnit.SECONDS); SCMClientConfig scmClientConfig = conf.getObject(SCMClientConfig.class); @@ -672,23 +617,24 @@ void removeConfiguration() { FileUtils.deleteQuietly(new File(path)); } + protected StorageContainerManager createAndStartSingleSCM() + throws AuthenticationException, IOException { + StorageContainerManager scm = createSCM(); + scm.start(); + configureScmDatanodeAddress(singletonList(scm)); + return scm; + } + /** * Creates a new StorageContainerManager instance. * * @return {@link StorageContainerManager} - * @throws IOException */ protected StorageContainerManager createSCM() throws IOException, AuthenticationException { configureSCM(); - SCMStorageConfig scmStore; - - // Set non standard layout version if needed. - scmLayoutVersion.ifPresent(integer -> - conf.set(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); - scmStore = new SCMStorageConfig(conf); + SCMStorageConfig scmStore = new SCMStorageConfig(conf); initializeScmStorage(scmStore); StorageContainerManager scm = HddsTestUtils.getScmSimple(conf, scmConfigurator); @@ -701,6 +647,7 @@ protected StorageContainerManager createSCM() } return scm; } + protected void initializeScmStorage(SCMStorageConfig scmStore) throws IOException { if (scmStore.getState() == StorageState.INITIALIZED) { @@ -736,31 +683,35 @@ void initializeOmStorage(OMStorage omStorage) throws IOException { omStorage.initialize(); } + protected OzoneManager createAndStartSingleOM() throws AuthenticationException, IOException { + OzoneManager om = createOM(); + setClients(om); + om.start(); + return om; + } + /** * Creates a new OzoneManager instance. * * @return {@link OzoneManager} - * @throws IOException */ protected OzoneManager createOM() throws IOException, AuthenticationException { configureOM(); - omLayoutVersion.ifPresent(integer -> - conf.set(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); return OzoneManager.createOm(conf); } - protected String getSCMAddresses(List scms) { + private String getSCMAddresses(List scms) { StringBuilder stringBuilder = new StringBuilder(); Iterator iter = scms.iterator(); while (iter.hasNext()) { StorageContainerManager scm = iter.next(); - stringBuilder.append(scm.getDatanodeRpcAddress().getHostString() + - ":" + scm.getDatanodeRpcAddress().getPort()); + stringBuilder.append(scm.getDatanodeRpcAddress().getHostString()) + .append(":") + .append(scm.getDatanodeRpcAddress().getPort()); if (iter.hasNext()) { stringBuilder.append(","); } @@ -769,76 +720,48 @@ protected String getSCMAddresses(List scms) { return stringBuilder.toString(); } + protected void configureScmDatanodeAddress(List scms) { + conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, getSCMAddresses(scms)); + } + + protected ReconServer createRecon() { + ReconServer reconServer = null; + if (includeRecon) { + configureRecon(); + reconServer = new ReconServer(); + reconServer.execute(NO_ARGS); + + OzoneStorageContainerManager reconScm = + reconServer.getReconStorageContainerManager(); + conf.set(OZONE_RECON_ADDRESS_KEY, + reconScm.getDatanodeRpcAddress().getHostString() + ":" + + reconScm.getDatanodeRpcAddress().getPort()); + } + return reconServer; + } + /** * Creates HddsDatanodeService(s) instance. * * @return List of HddsDatanodeService - * @throws IOException */ - protected List createHddsDatanodes( - List scms, ReconServer reconServer) + protected List createHddsDatanodes() throws IOException { - String scmAddress = getSCMAddresses(scms); - String[] args = new String[] {}; - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress); List hddsDatanodes = new ArrayList<>(); + + // Override default datanode initial and current version if necessary + overrideDatanodeVersions(dnInitialVersion, dnCurrentVersion); + for (int i = 0; i < numOfDatanodes; i++) { - OzoneConfiguration dnConf = new OzoneConfiguration(conf); - configureDatanodePorts(dnConf); - String datanodeBaseDir = path + "/datanode-" + Integer.toString(i); - Path metaDir = Paths.get(datanodeBaseDir, "meta"); - List dataDirs = new ArrayList<>(); - List reservedSpaceList = new ArrayList<>(); - for (int j = 0; j < numDataVolumes; j++) { - Path dir = Paths.get(datanodeBaseDir, "data-" + j, "containers"); - Files.createDirectories(dir); - dataDirs.add(dir.toString()); - datanodeReservedSpace.ifPresent( - s -> reservedSpaceList.add(dir + ":" + s)); - } - String reservedSpaceString = String.join(",", reservedSpaceList); - String listOfDirs = String.join(",", dataDirs); - Path ratisDir = Paths.get(datanodeBaseDir, "data", "ratis"); - Path workDir = Paths.get(datanodeBaseDir, "data", "replication", - "work"); - Files.createDirectories(metaDir); - Files.createDirectories(ratisDir); - Files.createDirectories(workDir); - dnConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); - dnConf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, listOfDirs); - dnConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, listOfDirs); - dnConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, - reservedSpaceString); - dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, - ratisDir.toString()); - if (reconServer != null) { - OzoneStorageContainerManager reconScm = - reconServer.getReconStorageContainerManager(); - dnConf.set(OZONE_RECON_ADDRESS_KEY, - reconScm.getDatanodeRpcAddress().getHostString() + ":" + - reconScm.getDatanodeRpcAddress().getPort()); - } + OzoneConfiguration dnConf = dnFactory.apply(conf); - HddsDatanodeService datanode = new HddsDatanodeService(args); + HddsDatanodeService datanode = new HddsDatanodeService(NO_ARGS); datanode.setConfiguration(dnConf); hddsDatanodes.add(datanode); } - if (dnLayoutVersion.isPresent()) { - configureLayoutVersionInDatanodes(hddsDatanodes, dnLayoutVersion.get()); - } return hddsDatanodes; } - private void configureLayoutVersionInDatanodes( - List dns, int layoutVersion) throws IOException { - for (HddsDatanodeService dn : dns) { - DatanodeLayoutStorage layoutStorage; - layoutStorage = new DatanodeLayoutStorage(dn.getConf(), - UUID.randomUUID().toString(), layoutVersion); - layoutStorage.initialize(); - } - } - protected void configureSCM() { conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, localhostWithFreePort()); @@ -850,30 +773,6 @@ protected void configureSCM() { localhostWithFreePort()); conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "3s"); - configureSCMheartbeat(); - } - - private void configureSCMheartbeat() { - if (hbInterval.isPresent()) { - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, - hbInterval.get(), TimeUnit.MILLISECONDS); - } else { - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, - DEFAULT_HB_INTERVAL_MS, - TimeUnit.MILLISECONDS); - } - - if (hbProcessorInterval.isPresent()) { - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - hbProcessorInterval.get(), - TimeUnit.MILLISECONDS); - } else { - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - DEFAULT_HB_PROCESSOR_INTERVAL_MS, - TimeUnit.MILLISECONDS); - } } private void configureOM() { @@ -884,22 +783,7 @@ private void configureOM() { conf.setInt(OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, getFreePort()); } - protected void configureDatanodePorts(ConfigurationTarget conf) { - conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, - anyHostWithFreePort()); - conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, - anyHostWithFreePort()); - conf.set(HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY, - anyHostWithFreePort()); - conf.setInt(DFS_CONTAINER_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); - conf.setFromObject(new ReplicationConfig().setPort(getFreePort())); - } - - protected void configureRecon() throws IOException { + protected void configureRecon() { ConfigurationProvider.resetConfiguration(); File tempNewFolder = new File(path, "recon"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java index cdd12ac841e3..618e2dd42107 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java @@ -17,14 +17,12 @@ */ package org.apache.hadoop.ozone; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashSet; import java.util.Set; -import java.util.UUID; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; @@ -49,10 +47,10 @@ * however shutting down the cluster in the background while the new cluster is * getting created will likely save about 10 seconds per test. * - * To use this class, setup the Cluster Provider in a static method annotated - * with @BeforeClass, eg: - * - * @BeforeClass + * To use this class, set up the Cluster Provider in a static method annotated + * with {@code @BeforeAll}, eg: + *

    + *   @BeforeAll
      *   public static void init() {
      *     OzoneConfiguration conf = new OzoneConfiguration();
      *     final int interval = 100;
    @@ -71,29 +69,34 @@
      *
      *     clusterProvider = new MiniOzoneClusterProvider(conf, builder, 5);
      *   }
    + * 
    * - * Ensure you shutdown the provider in a @AfterClass annotated method: + * Ensure you shut down the provider in an {@code @AfterAll} annotated method: * - * @AfterClass + *
    + *   @AfterAll
      *   public static void shutdown() throws InterruptedException {
      *     if (clusterProvider != null) {
      *       clusterProvider.shutdown();
      *     }
      *   }
    + * 
    * - * Then in the @Before method, or in the test itself, obtain a cluster: + * Then in the {@code @BeforeEach} method, or in the test itself, obtain a cluster: * - * @Before + *
    + *   @BeforeEach
      *   public void setUp() throws Exception {
      *     cluster = clusterProvider.provide();
      *   }
      *
    - *   @After
    + *   @AfterEach
      *   public void tearDown() throws InterruptedException, IOException {
      *     if (cluster != null) {
      *       clusterProvider.destroy(cluster);
      *     }
      *   }
    + * 
    * * This only works if the same config / builder object can be passed to each * cluster in the test suite. @@ -118,7 +121,6 @@ public class MiniOzoneClusterProvider { private final int clusterLimit; private int consumedClusterCount = 0; - private final OzoneConfiguration conf; private final MiniOzoneCluster.Builder builder; private final Thread createThread; private final Thread reapThread; @@ -130,16 +132,13 @@ public class MiniOzoneClusterProvider { = new ArrayBlockingQueue<>(EXPIRED_LIMIT); /** - * - * @param conf The configuration to use when creating the cluster * @param builder A builder object with all cluster options set * @param clusterLimit The total number of clusters this provider should * create. If another is requested after this limit has * been reached, an exception will be thrown. */ - public MiniOzoneClusterProvider(OzoneConfiguration conf, + public MiniOzoneClusterProvider( MiniOzoneCluster.Builder builder, int clusterLimit) { - this.conf = conf; this.builder = builder; this.clusterLimit = clusterLimit; createThread = createClusters(); @@ -214,9 +213,6 @@ private Thread createClusters() { while (!Thread.interrupted() && createdCount < clusterLimit) { MiniOzoneCluster cluster = null; try { - builder.setClusterId(UUID.randomUUID().toString()); - builder.setConf(new OzoneConfiguration(conf)); - cluster = builder.build(); cluster.waitForClusterToBeReady(); createdCount++; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 797a7515f206..16ef88177f3b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -56,8 +56,6 @@ import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; @@ -82,11 +80,6 @@ public class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl { private static final int RATIS_RPC_TIMEOUT = 1000; // 1 second public static final int NODE_FAILURE_TIMEOUT = 2000; // 2 seconds - /** - * Creates a new MiniOzoneCluster. - * - * @throws IOException if there is an I/O error - */ public MiniOzoneHAClusterImpl( OzoneConfiguration conf, SCMConfigurator scmConfigurator, @@ -101,19 +94,8 @@ public MiniOzoneHAClusterImpl( this.clusterMetaPath = clusterPath; } - @Override - public String getOMServiceId() { - return omhaService.getServiceId(); - } - - @Override - public String getSCMServiceId() { - return scmhaService.getServiceId(); - } - /** * Returns the first OzoneManager from the list. - * @return */ @Override public OzoneManager getOzoneManager() { @@ -355,12 +337,20 @@ private static void configureOMPorts(ConfigurationTarget conf, public static class Builder extends MiniOzoneClusterImpl.Builder { private static final String OM_NODE_ID_PREFIX = "omNode-"; - private List activeOMs = new ArrayList<>(); - private List inactiveOMs = new ArrayList<>(); + private final List activeOMs = new ArrayList<>(); + private final List inactiveOMs = new ArrayList<>(); private static final String SCM_NODE_ID_PREFIX = "scmNode-"; - private List activeSCMs = new ArrayList<>(); - private List inactiveSCMs = new ArrayList<>(); + private final List activeSCMs = new ArrayList<>(); + private final List inactiveSCMs = new ArrayList<>(); + + private String omServiceId; + private int numOfOMs; + private int numOfActiveOMs = ACTIVE_OMS_NOT_SET; + + private String scmServiceId; + private int numOfSCMs; + private int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET; /** * Creates a new Builder. @@ -371,8 +361,38 @@ public Builder(OzoneConfiguration conf) { super(conf); } + public Builder setNumOfOzoneManagers(int numOMs) { + this.numOfOMs = numOMs; + return this; + } + + public Builder setNumOfActiveOMs(int numActiveOMs) { + this.numOfActiveOMs = numActiveOMs; + return this; + } + + public Builder setOMServiceId(String serviceId) { + this.omServiceId = serviceId; + return this; + } + + public Builder setNumOfStorageContainerManagers(int numSCMs) { + this.numOfSCMs = numSCMs; + return this; + } + + public Builder setNumOfActiveSCMs(int numActiveSCMs) { + this.numOfActiveSCMs = numActiveSCMs; + return this; + } + + public Builder setSCMServiceId(String serviceId) { + this.scmServiceId = serviceId; + return this; + } + @Override - public MiniOzoneCluster build() throws IOException { + public MiniOzoneHAClusterImpl build() throws IOException { if (numOfActiveOMs > numOfOMs) { throw new IllegalArgumentException("Number of active OMs cannot be " + "more than the total number of OMs"); @@ -399,21 +419,16 @@ public MiniOzoneCluster build() throws IOException { initOMRatisConf(); SCMHAService scmService; OMHAService omService; - ReconServer reconServer = null; + ReconServer reconServer; try { scmService = createSCMService(); omService = createOMService(); - if (includeRecon) { - configureRecon(); - reconServer = new ReconServer(); - reconServer.execute(new String[] {}); - } + reconServer = createRecon(); } catch (AuthenticationException ex) { throw new IOException("Unable to build MiniOzoneCluster. ", ex); } - final List hddsDatanodes = createHddsDatanodes( - scmService.getActiveServices(), reconServer); + final List hddsDatanodes = createHddsDatanodes(); MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl(conf, scmConfigurator, omService, scmService, hddsDatanodes, path, @@ -422,9 +437,14 @@ public MiniOzoneCluster build() throws IOException { if (startDataNodes) { cluster.startHddsDatanodes(); } + prepareForNextBuild(); return cluster; } + protected int numberOfOzoneManagers() { + return numOfOMs; + } + protected void initOMRatisConf() { conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); @@ -460,8 +480,7 @@ protected void initOMRatisConf() { protected OMHAService createOMService() throws IOException, AuthenticationException { if (omServiceId == null) { - OzoneManager om = createOM(); - om.start(); + OzoneManager om = createAndStartSingleOM(); return new OMHAService(singletonList(om), null, null); } @@ -487,16 +506,9 @@ protected OMHAService createOMService() throws IOException, String metaDirPath = path + "/" + nodeId; config.set(OZONE_METADATA_DIRS, metaDirPath); - // Set non standard layout version if needed. - omLayoutVersion.ifPresent(integer -> - config.set(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); - OzoneManager.omInit(config); OzoneManager om = OzoneManager.createOm(config); - if (certClient != null) { - om.setCertClient(certClient); - } + setClients(om); omList.add(om); if (i <= numOfActiveOMs) { @@ -533,8 +545,7 @@ protected OMHAService createOMService() throws IOException, protected SCMHAService createSCMService() throws IOException, AuthenticationException { if (scmServiceId == null) { - StorageContainerManager scm = createSCM(); - scm.start(); + StorageContainerManager scm = createAndStartSingleSCM(); return new SCMHAService(singletonList(scm), null, null); } @@ -555,10 +566,6 @@ protected SCMHAService createSCMService() scmConfig.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, nodeId); scmConfig.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - scmLayoutVersion.ifPresent(integer -> - scmConfig.set(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); - configureSCM(); if (i == 1) { StorageContainerManager.scmInit(scmConfig, clusterId); @@ -603,6 +610,8 @@ protected SCMHAService createSCMService() } } + configureScmDatanodeAddress(activeSCMs); + return new SCMHAService(activeSCMs, inactiveSCMs, scmServiceId); } @@ -731,7 +740,7 @@ public void bootstrapOzoneManager(String omNodeId, while (true) { try { - OzoneConfiguration newConf = addNewOMToConfig(getOMServiceId(), + OzoneConfiguration newConf = addNewOMToConfig(omhaService.getServiceId(), omNodeId); if (updateConfigs) { @@ -788,7 +797,7 @@ private OzoneConfiguration addNewOMToConfig(String omServiceId, /** * Update the configurations of the given list of OMs. */ - public void updateOMConfigs(OzoneConfiguration newConf) { + private void updateOMConfigs(OzoneConfiguration newConf) { for (OzoneManager om : omhaService.getActiveServices()) { om.setConfiguration(newConf); } @@ -881,17 +890,17 @@ public void setupExitManagerForTesting() { * @param */ static class MiniOzoneHAService { - private Map serviceMap; - private List services; - private String serviceId; - private String serviceName; + private final Map serviceMap; + private final List services; + private final String serviceId; + private final String serviceName; // Active services s denote OM/SCM services which are up and running - private List activeServices; - private List inactiveServices; + private final List activeServices; + private final List inactiveServices; // Function to extract the Id from service - private Function serviceIdProvider; + private final Function serviceIdProvider; MiniOzoneHAService(String name, List activeList, List inactiveList, String serviceId, @@ -1017,8 +1026,8 @@ public StorageContainerManager getStorageContainerManager() { private static final class ExitManagerForOM extends ExitManager { - private MiniOzoneHAClusterImpl cluster; - private String omNodeId; + private final MiniOzoneHAClusterImpl cluster; + private final String omNodeId; private ExitManagerForOM(MiniOzoneHAClusterImpl cluster, String nodeId) { this.cluster = cluster; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index 5338cb8a0cc1..c084a72a3c79 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -44,11 +44,11 @@ public interface RatisTestHelper { Logger LOG = LoggerFactory.getLogger(RatisTestHelper.class); static void initRatisConf(RpcType rpc, OzoneConfiguration conf) { - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - LOG.info("{} = {}", OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + LOG.info("{} = {}", OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index a04c1236186c..26c1868084fc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -295,9 +296,11 @@ private void readData(OmKeyInfo keyInfo, Function retryFunc) throws IOException { XceiverClientFactory xceiverClientManager = ((RpcClient) client.getProxy()).getXceiverClientManager(); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); try (InputStream is = KeyInputStream.getFromOmKeyInfo(keyInfo, - xceiverClientManager, - false, retryFunc, blockInputStreamFactory)) { + xceiverClientManager, retryFunc, blockInputStreamFactory, + clientConfig)) { byte[] buf = new byte[100]; int readBytes = is.read(buf, 0, 100); assertEquals(100, readBytes); @@ -378,13 +381,12 @@ private static void setSecureConfig() throws IOException { private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId("TestSecretKey") - .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(1); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java index a181a6f45e95..87242cb2790e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java @@ -318,14 +318,13 @@ private String[] createArgsForCommand(String[] additionalArgs) { private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) - .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java index 79ea4c593c40..27abd9e05798 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java @@ -22,25 +22,27 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.balancer.ContainerBalancerConfiguration; import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import java.util.Optional; -import java.util.concurrent.TimeUnit; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * This class tests container balancer operations * from cblock clients. */ -@Timeout(value = 300, unit = TimeUnit.MILLISECONDS) +@Timeout(value = 300) public class TestContainerBalancerOperations { private static ScmClient containerBalancerClient; @@ -52,6 +54,8 @@ public static void setup() throws Exception { ozoneConf = new OzoneConfiguration(); ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); + ozoneConf.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 5, SECONDS); + ozoneConf.setBoolean("hdds.container.balancer.trigger.du.before.move.enable", true); cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build(); containerBalancerClient = new ContainerOperationClient(ozoneConf); cluster.waitForClusterToBeReady(); @@ -69,9 +73,6 @@ public static void cleanup() throws Exception { * @throws Exception */ @Test - @Unhealthy("Since the cluster doesn't have " + - "unbalanced nodes, ContainerBalancer stops before the assertion checks " + - "whether balancer is running.") public void testContainerBalancerCLIOperations() throws Exception { // test normally start and stop boolean running = containerBalancerClient.getContainerBalancerStatus(); @@ -81,13 +82,20 @@ public void testContainerBalancerCLIOperations() throws Exception { Optional maxDatanodesPercentageToInvolvePerIteration = Optional.of(100); Optional maxSizeToMovePerIterationInGB = Optional.of(1L); - Optional maxSizeEnteringTargetInGB = Optional.of(1L); - Optional maxSizeLeavingSourceInGB = Optional.of(1L); - + Optional maxSizeEnteringTargetInGB = Optional.of(6L); + Optional maxSizeLeavingSourceInGB = Optional.of(6L); + Optional balancingInterval = Optional.of(70); + Optional moveTimeout = Optional.of(65); + Optional moveReplicationTimeout = Optional.of(55); + Optional networkTopologyEnable = Optional.of(false); + Optional includeNodes = Optional.of(""); + Optional excludeNodes = Optional.of(""); containerBalancerClient.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); running = containerBalancerClient.getContainerBalancerStatus(); assertTrue(running); @@ -95,7 +103,7 @@ public void testContainerBalancerCLIOperations() throws Exception { // TODO: this is a temporary implementation for now // modify this after balancer is fully completed try { - Thread.sleep(100); + Thread.sleep(20000); } catch (InterruptedException e) { } running = containerBalancerClient.getContainerBalancerStatus(); @@ -105,7 +113,9 @@ public void testContainerBalancerCLIOperations() throws Exception { containerBalancerClient.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); running = containerBalancerClient.getContainerBalancerStatus(); assertTrue(running); @@ -115,4 +125,61 @@ public void testContainerBalancerCLIOperations() throws Exception { } //TODO: add more acceptance after container balancer is fully completed + + /** + * Test if Container Balancer CLI overrides default configs and + * options specified in the configs. + */ + @Test + public void testIfCBCLIOverridesConfigs() throws Exception { + //Configurations added in ozone-site.xml + ozoneConf.setInt("hdds.container.balancer.iterations", 40); + ozoneConf.setInt("hdds.container.balancer.datanodes.involved.max.percentage.per.iteration", 30); + + boolean running = containerBalancerClient.getContainerBalancerStatus(); + assertFalse(running); + + //CLI option for iterations and balancing interval is not passed + Optional iterations = Optional.empty(); + Optional balancingInterval = Optional.empty(); + + //CLI options are passed + Optional threshold = Optional.of(0.1); + Optional maxDatanodesPercentageToInvolvePerIteration = + Optional.of(100); + Optional maxSizeToMovePerIterationInGB = Optional.of(1L); + Optional maxSizeEnteringTargetInGB = Optional.of(6L); + Optional maxSizeLeavingSourceInGB = Optional.of(6L); + Optional moveTimeout = Optional.of(65); + Optional moveReplicationTimeout = Optional.of(55); + Optional networkTopologyEnable = Optional.of(true); + Optional includeNodes = Optional.of(""); + Optional excludeNodes = Optional.of(""); + containerBalancerClient.startContainerBalancer(threshold, iterations, + maxDatanodesPercentageToInvolvePerIteration, + maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); + running = containerBalancerClient.getContainerBalancerStatus(); + assertTrue(running); + + ContainerBalancerConfiguration config = cluster.getStorageContainerManager().getContainerBalancer().getConfig(); + + //If config value is not added in ozone-site.xml and CLI option is not passed + //then it takes the default configuration + assertEquals(70, config.getBalancingInterval().toMinutes()); + + //If config value is added in ozone-site.xml and CLI option is not passed + //then it takes the value from ozone-site.xml + assertEquals(40, config.getIterations()); + + //If config value is added in ozone-site.xml and CLI option is passed + //then it takes the CLI option. + assertEquals(100, config.getMaxDatanodesPercentageToInvolvePerIteration()); + + containerBalancerClient.stopContainerBalancer(); + running = containerBalancerClient.getContainerBalancerStatus(); + assertFalse(running); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index bb04c73ffe27..4488e467c29a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -21,13 +21,19 @@ import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Scanner; +import com.google.common.collect.Maps; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -36,8 +42,12 @@ import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; @@ -180,4 +190,65 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient client, "Could not create unique volume/bucket " + "in " + attempts + " attempts"); } + + public static Map createKeys(MiniOzoneCluster cluster, int numOfKeys) + throws Exception { + Map keyLocationMap = Maps.newHashMap(); + + try (OzoneClient client = cluster.newClient()) { + OzoneBucket bucket = createVolumeAndBucket(client); + for (int i = 0; i < numOfKeys; i++) { + String keyName = RandomStringUtils.randomAlphabetic(5) + i; + createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5)); + keyLocationMap.put(keyName, lookupOmKeyInfo(cluster, bucket, keyName)); + } + } + return keyLocationMap; + } + + public static void cleanupDeletedTable(OzoneManager ozoneManager) throws IOException { + Table deletedTable = ozoneManager.getMetadataManager().getDeletedTable(); + List nameList = new ArrayList<>(); + try (TableIterator> keyIter = deletedTable.iterator()) { + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + nameList.add(kv.getKey()); + } + } + nameList.forEach(k -> { + try { + deletedTable.delete(k); + } catch (IOException e) { + // do nothing + } + }); + } + + public static void cleanupOpenKeyTable(OzoneManager ozoneManager, BucketLayout bucketLayout) throws IOException { + Table openKeyTable = ozoneManager.getMetadataManager().getOpenKeyTable(bucketLayout); + List nameList = new ArrayList<>(); + try (TableIterator> keyIter = openKeyTable.iterator()) { + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + nameList.add(kv.getKey()); + } + } + nameList.forEach(k -> { + try { + openKeyTable.delete(k); + } catch (IOException e) { + // do nothing + } + }); + } + + private static OmKeyInfo lookupOmKeyInfo(MiniOzoneCluster cluster, + OzoneBucket bucket, String key) throws IOException { + OmKeyArgs arg = new OmKeyArgs.Builder() + .setVolumeName(bucket.getVolumeName()) + .setBucketName(bucket.getName()) + .setKeyName(key) + .build(); + return cluster.getOzoneManager().lookupKey(arg); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java index a82a1a8be70a..77970ad4470b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; @@ -46,6 +47,7 @@ import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; @@ -312,6 +314,8 @@ public void testDelegationToken(boolean useIp) throws Exception { try { // Start OM om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient( + new ScmBlockLocationTestingClient(null, null, 0))); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String username = ugi.getUserName(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java new file mode 100644 index 000000000000..9becc8b2591c --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; + +import org.junit.jupiter.api.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + +import java.io.IOException; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * + * This class is to test the serialization/deserialization of cluster tree + * information from SCM. + */ +@Timeout(300) +public class TestGetClusterTreeInformation { + + public static final Logger LOG = + LoggerFactory.getLogger(TestGetClusterTreeInformation.class); + private static int numOfDatanodes = 3; + private static MiniOzoneCluster cluster; + private static OzoneConfiguration conf; + private static StorageContainerManager scm; + + @BeforeAll + public static void init() throws IOException, TimeoutException, + InterruptedException { + conf = new OzoneConfiguration(); + cluster = MiniOzoneCluster.newHABuilder(conf) + .setNumOfOzoneManagers(3) + .setNumOfStorageContainerManagers(3) + .setNumDatanodes(numOfDatanodes) + .build(); + cluster.waitForClusterToBeReady(); + scm = cluster.getStorageContainerManager(); + } + + @AfterAll + public static void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testGetClusterTreeInformation() throws IOException { + SCMBlockLocationFailoverProxyProvider failoverProxyProvider = + new SCMBlockLocationFailoverProxyProvider(conf); + failoverProxyProvider.changeCurrentProxy(scm.getSCMNodeId()); + ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient = + new ScmBlockLocationProtocolClientSideTranslatorPB( + failoverProxyProvider); + + InnerNode expectedInnerNode = (InnerNode) scm.getClusterMap().getNode(ROOT); + InnerNode actualInnerNode = scmBlockLocationClient.getNetworkTopology(); + assertEquals(expectedInnerNode, actualInnerNode); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 74d52c4a9457..275061ef7843 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -48,7 +48,7 @@ import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -68,7 +68,7 @@ static void setup(@TempDir File testDir) { conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s"); } @@ -114,13 +114,13 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { // Each instance of SM will create an ozone container // that bounds to a random port. - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); List stateMachines = new ArrayList<>(); try { @@ -168,7 +168,7 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { } // Turn off the random port flag and test again - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); try ( DatanodeStateMachine sm1 = new DatanodeStateMachine( randomDatanodeDetails(), ozoneConf); @@ -182,8 +182,8 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { assertFalse(ports.add(sm2.getContainer().getReadChannel().getIPCPort())); assertFalse(ports.add(sm3.getContainer().getReadChannel().getIPCPort())); assertEquals(ports.iterator().next().intValue(), - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT)); + conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT)); } } @@ -258,8 +258,10 @@ public void testMultipleDataDirs() throws Exception { String reservedSpace = "1B"; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) - .setNumDataVolumes(3) - .setDatanodeReservedSpace(reservedSpace) + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setNumDataVolumes(3) + .setReservedSpace(reservedSpace) + .build()) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java deleted file mode 100644 index 0c51ba41311c..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.ozone.test.GenericTestUtils; - -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -/** - * This class tests MiniOzoneHAClusterImpl. - */ -@Timeout(value = 300, unit = TimeUnit.SECONDS) -public class TestMiniOzoneOMHACluster { - - private MiniOzoneHAClusterImpl cluster = null; - private OzoneConfiguration conf; - private String omServiceId; - private int numOfOMs = 3; - - /** - * Create a MiniOzoneHAClusterImpl for testing. - * - * @throws Exception - */ - @BeforeEach - public void init() throws Exception { - conf = new OzoneConfiguration(); - omServiceId = "omServiceId1"; - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, - OZONE_ADMINISTRATORS_WILDCARD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .build(); - cluster.waitForClusterToBeReady(); - } - - /** - * Shutdown MiniOzoneHAClusterImpl. - */ - @AfterEach - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testGetOMLeader() throws InterruptedException, TimeoutException { - AtomicReference ozoneManager = new AtomicReference<>(); - // Wait for OM leader election to finish - GenericTestUtils.waitFor(() -> { - OzoneManager om = cluster.getOMLeader(); - ozoneManager.set(om); - return om != null; - }, 100, 120000); - assertNotNull(ozoneManager, "Timed out waiting OM leader election to finish: " - + "no leader or more than one leader."); - assertTrue(ozoneManager.get().isLeaderReady(), "Should have gotten the leader!"); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 852f351ee25a..daeb3a7b2d74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -103,13 +103,12 @@ public static void init() throws Exception { private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) - .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } @@ -141,11 +140,11 @@ private CompleteMultipartUploadRequest.Part uploadPart(String uploadID, Response response = REST.put(BUCKET, KEY, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); CompleteMultipartUploadRequest.Part part = new CompleteMultipartUploadRequest.Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java new file mode 100644 index 000000000000..cef872597e43 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import com.google.common.collect.ImmutableMap; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.server.SCMConfigurator; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.net.StaticMapping; + +import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.OmTestManagers; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.io.File; +import java.util.List; +import java.util.Map; +import java.util.ArrayList; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; +import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_LEVEL; +import static org.mockito.Mockito.mock; + +/** + * {@link org.apache.hadoop.hdds.scm.server.TestSCMBlockProtocolServer} + * sortDatanodes tests for + * {@link org.apache.hadoop.ozone.om.KeyManagerImpl#sortDatanodes(List, String)}. + */ +@Timeout(300) +public class TestOMSortDatanodes { + + private static OzoneConfiguration config; + private static StorageContainerManager scm; + private static NodeManager nodeManager; + private static KeyManagerImpl keyManager; + private static StorageContainerLocationProtocol mockScmContainerClient; + private static OzoneManager om; + private static File dir; + private static final int NODE_COUNT = 10; + private static final Map EDGE_NODES = ImmutableMap.of( + "edge0", "/rack0", + "edge1", "/rack1" + ); + + @BeforeAll + public static void setup() throws Exception { + config = new OzoneConfiguration(); + dir = GenericTestUtils.getRandomizedTestDir(); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + config.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + StaticMapping.class.getName()); + config.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); + List datanodes = new ArrayList<>(NODE_COUNT); + List nodeMapping = new ArrayList<>(NODE_COUNT); + for (int i = 0; i < NODE_COUNT; i++) { + DatanodeDetails dn = randomDatanodeDetails(); + final String rack = "/rack" + (i % 2); + nodeMapping.add(dn.getHostName() + "=" + rack); + nodeMapping.add(dn.getIpAddress() + "=" + rack); + datanodes.add(dn); + } + EDGE_NODES.forEach((n, r) -> nodeMapping.add(n + "=" + r)); + config.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING, + String.join(",", nodeMapping)); + + SCMConfigurator configurator = new SCMConfigurator(); + configurator.setSCMHAManager(SCMHAManagerStub.getInstance(true)); + configurator.setScmContext(SCMContext.emptyContext()); + scm = HddsTestUtils.getScm(config, configurator); + scm.start(); + scm.exitSafeMode(); + nodeManager = scm.getScmNodeManager(); + datanodes.forEach(dn -> nodeManager.register(dn, null, null)); + mockScmContainerClient = + mock(StorageContainerLocationProtocol.class); + OmTestManagers omTestManagers + = new OmTestManagers(config, scm.getBlockProtocolServer(), + mockScmContainerClient); + om = omTestManagers.getOzoneManager(); + keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); + } + + @AfterAll + public static void cleanup() throws Exception { + if (scm != null) { + scm.stop(); + scm.join(); + } + if (om != null) { + om.stop(); + } + FileUtils.deleteDirectory(dir); + } + + @Test + public void sortDatanodesRelativeToDatanode() { + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + assertEquals(ROOT_LEVEL + 2, dn.getLevel()); + List sorted = + keyManager.sortDatanodes(nodeManager.getAllNodes(), nodeAddress(dn)); + assertEquals(dn, sorted.get(0), + "Source node should be sorted very first"); + assertRackOrder(dn.getNetworkLocation(), sorted); + } + } + + @Test + public void sortDatanodesRelativeToNonDatanode() { + for (Map.Entry entry : EDGE_NODES.entrySet()) { + assertRackOrder(entry.getValue(), + keyManager.sortDatanodes(nodeManager.getAllNodes(), entry.getKey())); + } + } + + @Test + public void testSortDatanodes() { + List nodes = nodeManager.getAllNodes(); + + // sort normal datanodes + String client; + client = nodeManager.getAllNodes().get(0).getIpAddress(); + List datanodeDetails = + keyManager.sortDatanodes(nodes, client); + assertEquals(NODE_COUNT, datanodeDetails.size()); + + // illegal client 1 + client += "X"; + datanodeDetails = keyManager.sortDatanodes(nodes, client); + assertEquals(NODE_COUNT, datanodeDetails.size()); + + // illegal client 2 + client = "/default-rack"; + datanodeDetails = keyManager.sortDatanodes(nodes, client); + assertEquals(NODE_COUNT, datanodeDetails.size()); + } + + private static void assertRackOrder(String rack, List list) { + int size = list.size(); + for (int i = 0; i < size / 2; i++) { + assertEquals(rack, list.get(i).getNetworkLocation(), + "Nodes in the same rack should be sorted first"); + } + for (int i = size / 2; i < size; i++) { + assertNotEquals(rack, list.get(i).getNetworkLocation(), + "Nodes in the other rack should be sorted last"); + } + } + + private String nodeAddress(DatanodeDetails dn) { + boolean useHostname = config.getBoolean( + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); + return useHostname ? dn.getHostName() : dn.getIpAddress(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 2b53f40dac38..8087d1f0e49d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -84,8 +84,6 @@ private void addPropertiesNotInXml() { configurationPropsToSkipCompare.addAll(Arrays.asList( HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA, HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT, - HddsConfigKeys.HDDS_KEY_ALGORITHM, - HddsConfigKeys.HDDS_SECURITY_PROVIDER, HddsConfigKeys.HDDS_X509_CRL_NAME, // HDDS-2873 HddsConfigKeys.HDDS_X509_GRACE_DURATION_TOKEN_CHECKS_ENABLED, OMConfigKeys.OZONE_OM_NODES_KEY, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 1be5b64ac87d..26ecb34c8866 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -50,10 +50,12 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; @@ -87,6 +89,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; @@ -121,21 +124,16 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient; -import static org.apache.hadoop.net.ServerSocketUtil.getPort; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; @@ -170,6 +168,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -179,7 +178,6 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; @@ -209,6 +207,7 @@ final class TestSecureOzoneCluster { private File testUserKeytab; private String testUserPrincipal; private StorageContainerManager scm; + private ScmBlockLocationProtocol scmBlockClient; private OzoneManager om; private HddsProtos.OzoneManagerDetailsProto omInfo; private String host; @@ -227,19 +226,14 @@ void init() { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); - conf.setInt(OZONE_SCM_CLIENT_PORT_KEY, - getPort(OZONE_SCM_CLIENT_PORT_DEFAULT, 100)); - conf.setInt(OZONE_SCM_DATANODE_PORT_KEY, - getPort(OZONE_SCM_DATANODE_PORT_DEFAULT, 100)); - conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY, - getPort(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100)); - conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY, - getPort(OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100)); - // use the same base ports as MiniOzoneHACluster - conf.setInt(OZONE_SCM_RATIS_PORT_KEY, getPort(1200, 100)); - conf.setInt(OZONE_SCM_GRPC_PORT_KEY, getPort(1201, 100)); + conf.setInt(OZONE_SCM_CLIENT_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_DATANODE_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_RATIS_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_GRPC_PORT_KEY, getFreePort()); conf.set(OZONE_OM_ADDRESS_KEY, - InetAddress.getLocalHost().getCanonicalHostName() + ":1202"); + InetAddress.getLocalHost().getCanonicalHostName() + ":" + getFreePort()); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); DefaultMetricsSystem.setMiniClusterMode(true); @@ -265,6 +259,7 @@ void init() { clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); omId = UUID.randomUUID().toString(); + scmBlockClient = new ScmBlockLocationTestingClient(null, null, 0); startMiniKdc(); setSecureConfig(); @@ -277,7 +272,7 @@ void init() { } @AfterEach - void stop() { + void stop() throws Exception { try { stopMiniKdc(); if (scm != null) { @@ -610,6 +605,7 @@ void testAccessControlExceptionOnClient() throws Exception { setupOm(conf); om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); } catch (Exception ex) { // Expects timeout failure from scmClient in om but om user login via @@ -677,6 +673,7 @@ void testDelegationTokenRenewal() throws Exception { setupOm(conf); OzoneManager.setTestSecureOmFlag(true); om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); @@ -764,6 +761,7 @@ void testGetSetRevokeS3Secret() throws Exception { setupOm(conf); // Start OM om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String username = ugi.getUserName(); @@ -875,11 +873,16 @@ void testSecureOmReInit() throws Exception { assertThat(logOutput) .doesNotContain("Successfully stored SCM signed certificate"); + if (om.stop()) { + om.join(); + } + conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setBoolean(OZONE_OM_S3_GPRC_SERVER_ENABLED, true); + conf.set(OZONE_OM_ADDRESS_KEY, + InetAddress.getLocalHost().getCanonicalHostName() + ":" + getFreePort()); OzoneManager.omInit(conf); - om.stop(); om = OzoneManager.createOm(conf); assertNotNull(om.getCertificateClient()); @@ -1000,6 +1003,7 @@ void testCertificateRotation() throws Exception { // create Ozone Manager instance, it will start the monitor task conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); om = OzoneManager.createOm(conf); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.setCertClient(client); // check after renew, client will have the new cert ID @@ -1165,6 +1169,7 @@ void testCertificateRotationUnRecoverableFailure() throws Exception { // create Ozone Manager instance, it will start the monitor task conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); om = OzoneManager.createOm(conf); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.setCertClient(mockClient); // check error message during renew @@ -1203,6 +1208,7 @@ void testDelegationTokenRenewCrossCertificateRenew() throws Exception { String omCertId1 = omCert.getSerialNumber().toString(); // Start OM om.setCertClient(certClient); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); GenericTestUtils.waitFor(() -> om.isLeaderReady(), 100, 10000); @@ -1346,27 +1352,16 @@ void testOMGrpcServerCertificateRenew() throws Exception { } // get new client, it should succeed. - try { - OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); - client1.close(); - } catch (Exception e) { - System.out.println("OzoneClientFactory.getRpcClient failed for " + - e.getMessage()); - fail("Create client should succeed for certificate is renewed"); - } + OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); + client1.close(); + // Wait for old OM certificate to expire GenericTestUtils.waitFor(() -> omCert.getNotAfter().before(new Date()), 500, certLifetime * 1000); // get new client, it should succeed too. - try { - OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); - client1.close(); - } catch (Exception e) { - System.out.println("OzoneClientFactory.getRpcClient failed for " + - e.getMessage()); - fail("Create client should succeed for certificate is renewed"); - } + OzoneClient client2 = OzoneClientFactory.getRpcClient(conf); + client2.close(); } finally { OzoneManager.setUgi(null); GrpcOmTransport.setCaCerts(null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java new file mode 100644 index 000000000000..8f79605ab051 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.conf.ConfigurationTarget; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; +import org.apache.hadoop.ozone.container.replication.ReplicationServer; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.anyHostWithFreePort; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; + +/** + * Creates datanodes with similar configuration (same number of volumes, same layout version, etc.). + */ +public class UniformDatanodesFactory implements MiniOzoneCluster.DatanodeFactory { + + private final AtomicInteger nodesCreated = new AtomicInteger(); + + private final int numDataVolumes; + private final String reservedSpace; + private final Integer layoutVersion; + + protected UniformDatanodesFactory(Builder builder) { + numDataVolumes = builder.numDataVolumes; + layoutVersion = builder.layoutVersion; + reservedSpace = builder.reservedSpace; + } + + @Override + public OzoneConfiguration apply(OzoneConfiguration conf) throws IOException { + final int i = nodesCreated.incrementAndGet(); + final OzoneConfiguration dnConf = new OzoneConfiguration(conf); + + configureDatanodePorts(dnConf); + + Path baseDir = Paths.get(Objects.requireNonNull(conf.get(OZONE_METADATA_DIRS)), "datanode-" + i); + + Path metaDir = baseDir.resolve("meta"); + Files.createDirectories(metaDir); + dnConf.set(OZONE_METADATA_DIRS, metaDir.toString()); + + List dataDirs = new ArrayList<>(); + List reservedSpaceList = new ArrayList<>(); + for (int j = 0; j < numDataVolumes; j++) { + Path dir = baseDir.resolve("data-" + j); + Files.createDirectories(dir); + dataDirs.add(dir.toString()); + if (reservedSpace != null) { + reservedSpaceList.add(dir + ":" + reservedSpace); + } + } + String reservedSpaceString = String.join(",", reservedSpaceList); + String listOfDirs = String.join(",", dataDirs); + dnConf.set(DFS_DATANODE_DATA_DIR_KEY, listOfDirs); + dnConf.set(HDDS_DATANODE_DIR_KEY, listOfDirs); + dnConf.set(HDDS_DATANODE_DIR_DU_RESERVED, reservedSpaceString); + + Path ratisDir = baseDir.resolve("ratis"); + Files.createDirectories(ratisDir); + dnConf.set(HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString()); + + if (layoutVersion != null) { + DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage( + dnConf, UUID.randomUUID().toString(), layoutVersion); + layoutStorage.initialize(); + } + + return dnConf; + } + + private void configureDatanodePorts(ConfigurationTarget conf) { + conf.set(HDDS_REST_HTTP_ADDRESS_KEY, anyHostWithFreePort()); + conf.set(HDDS_DATANODE_HTTP_ADDRESS_KEY, anyHostWithFreePort()); + conf.set(HDDS_DATANODE_CLIENT_ADDRESS_KEY, anyHostWithFreePort()); + conf.setInt(HDDS_CONTAINER_IPC_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_IPC_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); + conf.setFromObject(new ReplicationServer.ReplicationConfig().setPort(getFreePort())); + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder for UniformDatanodesFactory. + */ + public static class Builder { + + private int numDataVolumes = 1; + private String reservedSpace; + private Integer layoutVersion; + + /** + * Sets the number of data volumes per datanode. + */ + public Builder setNumDataVolumes(int n) { + numDataVolumes = n; + return this; + } + + /** + * Sets the reserved space + * {@link org.apache.hadoop.hdds.scm.ScmConfigKeys#HDDS_DATANODE_DIR_DU_RESERVED} + * for each volume in each datanode. + * @param reservedSpace String that contains the numeric size value and ends with a + * {@link org.apache.hadoop.hdds.conf.StorageUnit} suffix. For example, "50GB". + * @see org.apache.hadoop.ozone.container.common.volume.VolumeInfo + */ + public Builder setReservedSpace(String reservedSpace) { + this.reservedSpace = reservedSpace; + return this; + } + + public Builder setLayoutVersion(int layoutVersion) { + this.layoutVersion = layoutVersion; + return this; + } + + public UniformDatanodesFactory build() { + return new UniformDatanodesFactory(this); + } + + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java index f2a079ca80ca..e2a15595b553 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.client; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -36,10 +37,10 @@ public class TestOzoneClientFactory { public void testRemoteException() { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); Exception e = assertThrows(Exception.class, () -> { MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setTotalPipelineNumLimit(10) .build(); String omPort = cluster.getOzoneManager().getRpcPort(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java index 0b0b2586c9e2..b40b0bbcc626 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; @@ -121,6 +122,7 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { TimeUnit.SECONDS); conf.setBoolean(OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED, zeroCopyEnabled); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -129,8 +131,9 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { .setStreamBufferMaxSize(maxFlushSize) .applyTo(conf); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) - .setTotalPipelineNumLimit(10).build(); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) + .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java index e7c8be170ca1..d7ce08338db8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -85,8 +84,6 @@ private void startCluster(OzoneConfiguration conf) throws Exception { blockSize = 2 * maxFlushSize; // Make sure the pipeline does not get destroyed quickly - conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, - 60, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 60000, TimeUnit.SECONDS); DatanodeRatisServerConfig ratisServerConfig = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index c0ae49f3bf41..d668bb4b6522 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -49,6 +49,7 @@ HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys. HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -84,10 +85,11 @@ public static void init() throws Exception { conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setQuietMode(false); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index 8bb791bb103e..90a3f1d68933 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -17,9 +17,11 @@ package org.apache.hadoop.ozone.client.rpc; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; @@ -28,6 +30,7 @@ import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -45,6 +48,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -70,6 +74,7 @@ public class TestBlockDataStreamOutput { private static String volumeName; private static String bucketName; private static String keyString; + private static final int DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(); /** * Create a MiniDFSCluster for testing. @@ -105,7 +110,7 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) + .setDatanodeCurrentVersion(DN_OLD_VERSION) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key @@ -270,4 +275,25 @@ public void testTotalAckDataLength() throws Exception { assertEquals(dataLength, stream.getTotalAckDataLength()); } + @Test + public void testDatanodeVersion() throws Exception { + // Verify all DNs internally have versions set correctly + List dns = cluster.getHddsDatanodes(); + for (HddsDatanodeService dn : dns) { + DatanodeDetails details = dn.getDatanodeDetails(); + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + + String keyName = getKeyName(); + OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); + KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput) key.getByteBufStreamOutput(); + BlockDataStreamOutputEntry stream = keyDataStreamOutput.getStreamEntries().get(0); + + // Now check 3 DNs in a random pipeline returns the correct DN versions + List streamDnDetails = stream.getPipeline().getNodes(); + for (DatanodeDetails details : streamDnDetails) { + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index 1e9cefbaa481..d02c7636ca01 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -22,6 +22,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -52,7 +53,8 @@ import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.PutBlock; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.WriteChunk; @@ -80,8 +82,13 @@ class TestBlockOutputStream { static MiniOzoneCluster createCluster() throws IOException, InterruptedException, TimeoutException { - OzoneConfiguration conf = new OzoneConfiguration(); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumType(ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(false); + clientConfig.setEnablePutblockPiggybacking(true); + conf.setFromObject(clientConfig); + conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS); conf.setQuietMode(false); @@ -115,7 +122,6 @@ static MiniOzoneCluster createCluster() throws IOException, MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); @@ -143,11 +149,21 @@ void shutdown() { } } + private static Stream clientParameters() { + return Stream.of( + Arguments.of(true, true), + Arguments.of(true, false), + Arguments.of(false, true), + Arguments.of(false, false) + ); + } + static OzoneClientConfig newClientConfig(ConfigurationSource source, - boolean flushDelay) { + boolean flushDelay, boolean enablePiggybacking) { OzoneClientConfig clientConfig = source.getObject(OzoneClientConfig.class); clientConfig.setChecksumType(ChecksumType.NONE); clientConfig.setStreamBufferFlushDelay(flushDelay); + clientConfig.setEnablePutblockPiggybacking(enablePiggybacking); return clientConfig; } @@ -159,9 +175,9 @@ static OzoneClient newClient(OzoneConfiguration conf, } @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testWriteLessThanChunkSize(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + @MethodSource("clientParameters") + void testWriteLessThanChunkSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); @@ -254,9 +270,9 @@ void testWriteLessThanChunkSize(boolean flushDelay) throws Exception { } @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testWriteExactlyFlushSize(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + @MethodSource("clientParameters") + void testWriteExactlyFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); @@ -277,10 +293,16 @@ void testWriteExactlyFlushSize(boolean flushDelay) throws Exception { byte[] data1 = RandomUtils.nextBytes(dataLength); key.write(data1); - assertEquals(pendingWriteChunkCount + 2, - metrics.getPendingContainerOpCountMetrics(WriteChunk)); - assertEquals(pendingPutBlockCount + 1, - metrics.getPendingContainerOpCountMetrics(PutBlock)); + assertEquals(writeChunkCount + 2, + metrics.getContainerOpCountMetrics(WriteChunk)); + assertEquals(putBlockCount + 1, + metrics.getContainerOpCountMetrics(PutBlock)); + // The WriteChunk and PutBlock can be completed soon. + assertThat(metrics.getPendingContainerOpCountMetrics(WriteChunk)) + .isLessThanOrEqualTo(pendingWriteChunkCount + 2); + assertThat(metrics.getPendingContainerOpCountMetrics(PutBlock)) + .isLessThanOrEqualTo(pendingPutBlockCount + 1); + KeyOutputStream keyOutputStream = assertInstanceOf(KeyOutputStream.class, key.getOutputStream()); assertEquals(1, keyOutputStream.getStreamEntries().size()); @@ -298,7 +320,10 @@ void testWriteExactlyFlushSize(boolean flushDelay) throws Exception { assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); assertEquals(0, blockOutputStream.getTotalAckDataLength()); - assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size()); + // Before flush, if there was no pending PutBlock which means it is complete. + // It put a commit index into commitIndexMap. + assertEquals((metrics.getPendingContainerOpCountMetrics(PutBlock) == pendingPutBlockCount) ? 1 : 0, + blockOutputStream.getCommitIndex2flushedDataMap().size()); // Now do a flush. key.flush(); @@ -306,10 +331,20 @@ void testWriteExactlyFlushSize(boolean flushDelay) throws Exception { assertEquals(1, keyOutputStream.getStreamEntries().size()); // The previously written data is equal to flushSize, so no action is // triggered when execute flush, if flushDelay is enabled. - assertEquals(pendingWriteChunkCount + (flushDelay ? 2 : 0), - metrics.getPendingContainerOpCountMetrics(WriteChunk)); - assertEquals(pendingPutBlockCount + (flushDelay ? 1 : 0), - metrics.getPendingContainerOpCountMetrics(PutBlock)); + // If flushDelay is disabled, it will call waitOnFlushFutures to wait all + // putBlocks finished. It was broken because WriteChunk and PutBlock + // can be complete regardless of whether the flush executed or not. + if (flushDelay) { + assertThat(metrics.getPendingContainerOpCountMetrics(WriteChunk)) + .isLessThanOrEqualTo(pendingWriteChunkCount + 2); + assertThat(metrics.getPendingContainerOpCountMetrics(PutBlock)) + .isLessThanOrEqualTo(pendingWriteChunkCount + 1); + } else { + assertEquals(pendingWriteChunkCount, + metrics.getPendingContainerOpCountMetrics(WriteChunk)); + assertEquals(pendingPutBlockCount, + metrics.getPendingContainerOpCountMetrics(PutBlock)); + } // Since the data in the buffer is already flushed, flush here will have // no impact on the counters and data structures @@ -321,7 +356,12 @@ void testWriteExactlyFlushSize(boolean flushDelay) throws Exception { blockOutputStream.getBufferPool().computeBufferData()); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); - assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size()); + // If the flushDelay feature is enabled, nothing happens. + // The assertions will be as same as those before flush. + // If it flushed, the Commit index will be removed. + assertEquals((flushDelay && + (metrics.getPendingContainerOpCountMetrics(PutBlock) == pendingPutBlockCount)) ? 1 : 0, + blockOutputStream.getCommitIndex2flushedDataMap().size()); assertEquals(flushDelay ? 0 : dataLength, blockOutputStream.getTotalAckDataLength()); @@ -349,9 +389,9 @@ void testWriteExactlyFlushSize(boolean flushDelay) throws Exception { } @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testWriteMoreThanChunkSize(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + @MethodSource("clientParameters") + void testWriteMoreThanChunkSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); @@ -398,7 +438,7 @@ void testWriteMoreThanChunkSize(boolean flushDelay) throws Exception { key.flush(); assertEquals(writeChunkCount + 2, metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(putBlockCount + 1, + assertEquals(putBlockCount + ((enablePiggybacking) ? 0 : 1), metrics.getContainerOpCountMetrics(PutBlock)); assertEquals(pendingWriteChunkCount, metrics.getPendingContainerOpCountMetrics(WriteChunk)); @@ -427,9 +467,9 @@ void testWriteMoreThanChunkSize(boolean flushDelay) throws Exception { metrics.getPendingContainerOpCountMetrics(PutBlock)); assertEquals(writeChunkCount + 2, metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(putBlockCount + 2, + assertEquals(putBlockCount + ((enablePiggybacking) ? 1 : 2), metrics.getContainerOpCountMetrics(PutBlock)); - assertEquals(totalOpCount + 4, metrics.getTotalOpCount()); + assertEquals(totalOpCount + ((enablePiggybacking) ? 3 : 4), metrics.getTotalOpCount()); assertEquals(0, keyOutputStream.getStreamEntries().size()); validateData(keyName, data1, client.getObjectStore(), VOLUME, BUCKET); @@ -437,9 +477,9 @@ void testWriteMoreThanChunkSize(boolean flushDelay) throws Exception { } @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testWriteMoreThanFlushSize(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + @MethodSource("clientParameters") + void testWriteMoreThanFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); @@ -478,13 +518,29 @@ void testWriteMoreThanFlushSize(boolean flushDelay) throws Exception { assertEquals(FLUSH_SIZE, blockOutputStream.getTotalDataFlushedLength()); assertEquals(0, blockOutputStream.getTotalAckDataLength()); - assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size()); + // Before flush, if there was no pending PutBlock which means it is complete. + // It put a commit index into commitIndexMap. + assertEquals((metrics.getPendingContainerOpCountMetrics(PutBlock) == pendingPutBlockCount) ? 1 : 0, + blockOutputStream.getCommitIndex2flushedDataMap().size()); - assertEquals(FLUSH_SIZE, blockOutputStream.getTotalDataFlushedLength()); - assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size()); + key.flush(); + if (flushDelay) { + // If the flushDelay feature is enabled, nothing happens. + // The assertions will be as same as those before flush. + assertEquals(FLUSH_SIZE, blockOutputStream.getTotalDataFlushedLength()); + assertEquals((metrics.getPendingContainerOpCountMetrics(PutBlock) == pendingPutBlockCount) ? 1 : 0, + blockOutputStream.getCommitIndex2flushedDataMap().size()); + + assertEquals(0, blockOutputStream.getTotalAckDataLength()); + assertEquals(1, keyOutputStream.getStreamEntries().size()); + } else { + assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); + assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size()); + + assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); + assertEquals(1, keyOutputStream.getStreamEntries().size()); + } - assertEquals(0, blockOutputStream.getTotalAckDataLength()); - assertEquals(1, keyOutputStream.getStreamEntries().size()); key.close(); @@ -494,9 +550,11 @@ void testWriteMoreThanFlushSize(boolean flushDelay) throws Exception { metrics.getPendingContainerOpCountMetrics(PutBlock)); assertEquals(writeChunkCount + 3, metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(putBlockCount + 2, + // If the flushDelay was disabled, it sends PutBlock with the data in the buffer. + assertEquals(putBlockCount + (flushDelay ? 2 : 3) - (enablePiggybacking ? 1 : 0), metrics.getContainerOpCountMetrics(PutBlock)); - assertEquals(totalOpCount + 5, metrics.getTotalOpCount()); + assertEquals(totalOpCount + (flushDelay ? 5 : 6) - (enablePiggybacking ? 1 : 0), + metrics.getTotalOpCount()); assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); // make sure the bufferPool is empty assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); @@ -509,9 +567,9 @@ void testWriteMoreThanFlushSize(boolean flushDelay) throws Exception { } @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testWriteExactlyMaxFlushSize(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + @MethodSource("clientParameters") + void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); @@ -600,9 +658,9 @@ void testWriteExactlyMaxFlushSize(boolean flushDelay) throws Exception { } @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testWriteMoreThanMaxFlushSize(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + @MethodSource("clientParameters") + void testWriteMoreThanMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); @@ -687,9 +745,9 @@ void testWriteMoreThanMaxFlushSize(boolean flushDelay) throws Exception { assertEquals(writeChunkCount + 5, metrics.getContainerOpCountMetrics(WriteChunk)); // The previous flush did not trigger any action with flushDelay enabled - assertEquals(putBlockCount + (flushDelay ? 3 : 4), + assertEquals(putBlockCount + (flushDelay ? 2 : 3) + (enablePiggybacking ? 0 : 1), metrics.getContainerOpCountMetrics(PutBlock)); - assertEquals(totalOpCount + (flushDelay ? 8 : 9), + assertEquals(totalOpCount + (flushDelay ? 7 : 8) + ((enablePiggybacking ? 0 : 1)), metrics.getTotalOpCount()); assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index 708f4cec04b8..8d69da3ef3e1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -56,7 +56,10 @@ import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.stream.Stream; /** * Tests failure detection and handling in BlockOutputStream Class. @@ -79,10 +82,19 @@ void shutdown() { } } + private static Stream clientParameters() { + return Stream.of( + Arguments.of(true, true), + Arguments.of(true, false), + Arguments.of(false, true), + Arguments.of(false, false) + ); + } + @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testContainerClose(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + @MethodSource("clientParameters") + void testContainerClose(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { testWatchForCommitWithCloseContainerException(client); testWatchForCommitWithSingleNodeRatis(client); @@ -174,10 +186,10 @@ private void testWatchForCommitWithCloseContainerException(OzoneClient client) } @ParameterizedTest - @ValueSource(booleans = {true, false}) + @MethodSource("clientParameters") @Flaky("HDDS-6113") - void testWatchForCommitDatanodeFailure(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + void testWatchForCommitDatanodeFailure(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { String keyName = getKeyName(); OzoneOutputStream key = createKey(client, keyName); @@ -259,9 +271,9 @@ void testWatchForCommitDatanodeFailure(boolean flushDelay) throws Exception { } @ParameterizedTest - @ValueSource(booleans = {true, false}) - void test2DatanodesFailure(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + @MethodSource("clientParameters") + void test2DatanodesFailure(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { String keyName = getKeyName(); OzoneOutputStream key = createKey(client, keyName); @@ -560,10 +572,10 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client) } @ParameterizedTest - @ValueSource(booleans = {true, false}) + @MethodSource("clientParameters") @Flaky("HDDS-6113") - void testDatanodeFailureWithSingleNode(boolean flushDelay) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + void testDatanodeFailureWithSingleNode(boolean flushDelay, boolean enablePiggybacking) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { String keyName = getKeyName(); OzoneOutputStream key = @@ -650,10 +662,10 @@ void testDatanodeFailureWithSingleNode(boolean flushDelay) throws Exception { } @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testDatanodeFailureWithPreAllocation(boolean flushDelay) + @MethodSource("clientParameters") + void testDatanodeFailureWithPreAllocation(boolean flushDelay, boolean enablePiggybacking) throws Exception { - OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { String keyName = getKeyName(); OzoneOutputStream key = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index fe08b9e0f4ba..78a4e78647eb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -59,6 +59,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; @@ -115,11 +116,12 @@ public static void init() throws Exception { replicationConf.setInterval(Duration.ofMillis(containerReportInterval)); conf.setFromObject(replicationConf); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 6); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(4) - .setTotalPipelineNumLimit(6).setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); cluster.getStorageContainerManager().getReplicationManager().start(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index 84b7579cd01d..3f1c31edfe70 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -53,6 +53,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -92,6 +93,7 @@ public void setup() throws Exception { baseDir.mkdirs(); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -101,7 +103,7 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s"); @@ -113,7 +115,6 @@ public void setup() throws Exception { // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) - .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .setSecretKeyClient(new SecretKeyTestClient()) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index 1d0f25b3a041..eea068a8742f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -51,6 +51,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; @@ -94,6 +95,8 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000)); @@ -118,7 +121,6 @@ public void setup() throws Exception { conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 2c11177e5eaf..b6eaca8e80d0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -83,6 +83,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; @@ -143,6 +144,7 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); @@ -166,10 +168,10 @@ public static void init() throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(20)); conf.setFromObject(raftClientConfig); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10).setHbInterval(200) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); @@ -307,9 +309,9 @@ public void testContainerStateMachineFailures() throws Exception { // restart the hdds datanode, container should not in the regular set OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + UUID.randomUUID(); - config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); ozoneContainer = cluster.getHddsDatanodes().get(index) @@ -371,9 +373,9 @@ public void testUnhealthyContainer() throws Exception { OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + UUID.randomUUID(); - config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); // restart the hdds datanode and see if the container is listed in the // in the missing container set and not in the regular set diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index 23ab89b80c65..229059d84ad1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -56,6 +56,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; @@ -101,10 +102,11 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); @@ -119,7 +121,6 @@ public void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) - .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .setSecretKeyClient(new SecretKeyTestClient()) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java index 97a3047bfdb0..d48df574a94e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java @@ -50,6 +50,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -89,6 +90,7 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); @@ -121,13 +123,11 @@ public void setup() throws Exception { .setStreamBufferMaxSize(MAX_FLUSH_SIZE) .applyTo(conf); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); - cluster = - MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .setHbInterval(200) - .build(); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java new file mode 100644 index 000000000000..5e7d8a4b0525 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.client.rpc; + +import org.apache.hadoop.hdds.DatanodeVersion; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.HddsDatanodeService; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.io.BlockDataStreamOutputEntry; +import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; +import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; +import org.apache.hadoop.ozone.container.TestHelper; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.List; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Tests DatanodeVersion in client stream. + */ +@Timeout(120) +public class TestDatanodeVersion { + private static MiniOzoneCluster cluster; + private static OzoneConfiguration conf = new OzoneConfiguration(); + private static OzoneClient client; + private static ObjectStore objectStore; + private static int chunkSize; + private static int flushSize; + private static int maxFlushSize; + private static int blockSize; + private static String volumeName; + private static String bucketName; + private static final int DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(); + + /** + * Create a MiniDFSCluster for testing. + *

    + * Ozone is made active by setting OZONE_ENABLED = true + */ + @BeforeAll + public static void init() throws Exception { + chunkSize = 100; + flushSize = 2 * chunkSize; + maxFlushSize = 2 * flushSize; + blockSize = 2 * maxFlushSize; + + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + conf.setFromObject(clientConfig); + + conf.setQuietMode(false); + conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) + .setStreamBufferMaxSize(maxFlushSize) + .setDataStreamBufferFlushSize(maxFlushSize) + .setDataStreamMinPacketSize(chunkSize) + .setDataStreamWindowSize(5 * chunkSize) + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .setDatanodeCurrentVersion(DN_OLD_VERSION) + .build(); + cluster.waitForClusterToBeReady(); + //the easiest way to create an open container is creating a key + client = OzoneClientFactory.getRpcClient(conf); + objectStore = client.getObjectStore(); + volumeName = "testblockoutputstream"; + bucketName = volumeName; + objectStore.createVolume(volumeName); + objectStore.getVolume(volumeName).createBucket(bucketName); + } + + /** + * Shutdown MiniDFSCluster. + */ + @AfterAll + public static void shutdown() { + IOUtils.closeQuietly(client); + if (cluster != null) { + cluster.shutdown(); + } + } + + static OzoneDataStreamOutput createKey(String keyName, ReplicationType type, long size) throws Exception { + return TestHelper.createStreamKey(keyName, type, size, objectStore, volumeName, bucketName); + } + + @Test + public void testStreamDatanodeVersion() throws Exception { + // Verify all DNs internally have versions set correctly + List dns = cluster.getHddsDatanodes(); + for (HddsDatanodeService dn : dns) { + DatanodeDetails details = dn.getDatanodeDetails(); + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + + String keyName = UUID.randomUUID().toString(); + OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); + KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput) key.getByteBufStreamOutput(); + BlockDataStreamOutputEntry stream = keyDataStreamOutput.getStreamEntries().get(0); + + // Now check 3 DNs in a random pipeline returns the correct DN versions + List streamDnDetails = stream.getPipeline().getNodes(); + for (DatanodeDetails details : streamDnDetails) { + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index fa50dac64f7e..d4ff85736273 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -65,6 +65,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; @@ -101,15 +102,20 @@ public class TestDeleteWithInAdequateDN { */ @BeforeAll public static void init() throws Exception { + final int numOfDatanodes = 3; + conf = new OzoneConfiguration(); path = GenericTestUtils .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100, + TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT); // Make the stale, dead and server failure timeout higher so that a dead // node is not detecte at SCM as well as the pipeline close action // never gets initiated early at Datanode in the test. @@ -156,12 +162,8 @@ public static void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setQuietMode(false); - int numOfDatanodes = 3; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes) - .setTotalPipelineNumLimit( - numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT) - .setHbInterval(100) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(THREE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index d1b20de88a86..5c0910ecdc2d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -111,11 +111,12 @@ private void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -139,7 +140,7 @@ private void init() throws Exception { Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10).setTotalPipelineNumLimit(15).build(); + .setNumDatanodes(10).build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index fadc06bd57bd..b4ad49a3ed5a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -107,11 +107,12 @@ private void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -140,7 +141,6 @@ private void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) - .setTotalPipelineNumLimit(15) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java index 4ccdd0e2d4b3..51ebf3fa0ccd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java @@ -44,6 +44,7 @@ import java.io.IOException; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -73,8 +74,9 @@ public class TestHybridPipelineOnDatanode { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 5); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3) - .setTotalPipelineNumLimit(5).build(); + .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index b7b52d389bc3..34f85d8e9922 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -105,14 +105,13 @@ private void startCluster(int datanodes) throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(datanodes) - .setTotalPipelineNumLimit(0) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 919654d82a9b..5288bcb3cf21 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -22,6 +22,7 @@ import java.net.URI; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.time.Instant; import java.util.ArrayList; @@ -34,10 +35,14 @@ import java.util.UUID; import com.google.common.cache.Cache; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -64,8 +69,10 @@ import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -79,6 +86,14 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.getContainerByID; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.verifyOnDiskData; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.ozone.test.GenericTestUtils.getTestStartTime; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -88,6 +103,7 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -120,6 +136,7 @@ class TestOzoneAtRestEncryption { private static final int DEFAULT_CRYPTO_BUFFER_SIZE = 8 * 1024; // 8KB // (this is the default Crypto Buffer size as determined by the config // hadoop.security.crypto.buffer.size) + private static MessageDigest eTagProvider; @BeforeAll static void init() throws Exception { @@ -169,6 +186,12 @@ static void init() throws Exception { // create test key createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); + + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + conf.setInt(OZONE_REPLICATION, 1); } @AfterAll @@ -190,6 +213,14 @@ static void shutdown() throws IOException { } } + static void reInitClient() throws IOException { + ozClient = OzoneClientFactory.getRpcClient(conf); + store = ozClient.getObjectStore(); + TestOzoneRpcClient.setOzClient(ozClient); + TestOzoneRpcClient.setStore(store); + } + + @ParameterizedTest @EnumSource void testPutKeyWithEncryption(BucketLayout bucketLayout) throws Exception { @@ -206,6 +237,7 @@ void testPutKeyWithEncryption(BucketLayout bucketLayout) throws Exception { createAndVerifyKeyData(bucket); createAndVerifyStreamKeyData(bucket); + createAndVerifyFileSystemData(bucket); } @ParameterizedTest @@ -252,6 +284,38 @@ static void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { out.write(value.getBytes(StandardCharsets.UTF_8)); } verifyKeyData(bucket, keyName, value, testStartTime); + OzoneKeyDetails key1 = bucket.getKey(keyName); + + // Overwrite the key + try (OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(StandardCharsets.UTF_8).length, + ReplicationConfig.fromTypeAndFactor(RATIS, ONE), + new HashMap<>())) { + out.write(value.getBytes(StandardCharsets.UTF_8)); + } + OzoneKeyDetails key2 = bucket.getKey(keyName); + assertNotEquals(key1.getFileEncryptionInfo().toString(), key2.getFileEncryptionInfo().toString()); + } + + static void createAndVerifyFileSystemData( + OzoneBucket bucket) throws Exception { + // OBS does not support file system semantics. + if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) { + return; + } + Instant testStartTime = getTestStartTime(); + String keyName = UUID.randomUUID().toString(); + String value = "sample value"; + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + final Path file = new Path(dir, keyName); + try (FileSystem fs = FileSystem.get(conf)) { + try (FSDataOutputStream out = fs.create(file, true)) { + out.write(value.getBytes(StandardCharsets.UTF_8)); + } + } + verifyKeyData(bucket, keyName, value, testStartTime); } static void verifyKeyData(OzoneBucket bucket, String keyName, String value, @@ -272,7 +336,6 @@ static void verifyKeyData(OzoneBucket bucket, String keyName, String value, len = is.read(fileContent); } - assertEquals(len, value.length()); assertTrue(verifyRatisReplication(bucket.getVolumeName(), bucket.getName(), keyName, RATIS, @@ -280,6 +343,13 @@ static void verifyKeyData(OzoneBucket bucket, String keyName, String value, assertEquals(value, new String(fileContent, StandardCharsets.UTF_8)); assertFalse(key.getCreationTime().isBefore(testStartTime)); assertFalse(key.getModificationTime().isBefore(testStartTime)); + + long containerID = key.getOzoneKeyLocations().get(0) + .getContainerID(); + Container container = getContainerByID(cluster, containerID); + // the data stored on disk should not be the same as the input. + assertFalse(verifyOnDiskData(cluster, container, key, value), + "On disk block is written in clear text!"); } private OzoneBucket createVolumeAndBucket(String volumeName, @@ -439,6 +509,18 @@ void mpuOnePart(BucketLayout bucketLayout) throws Exception { createVolumeAndBucket(volumeName, bucketName, bucketLayout), 1); } + @ParameterizedTest + @EnumSource + void mpuOnePartInvalidUploadID(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OMException e = assertThrows(OMException.class, () -> + testMultipartUploadWithEncryption( + createVolumeAndBucket(volumeName, bucketName, bucketLayout), 1, false, true) + ); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, e.getResult()); + } + @ParameterizedTest @EnumSource void mpuTwoParts(BucketLayout bucketLayout) throws Exception { @@ -516,12 +598,21 @@ private void testMultipartUploadWithEncryption(OzoneBucket bucket, private void testMultipartUploadWithEncryption(OzoneBucket bucket, int numParts, boolean isStream) throws Exception { + testMultipartUploadWithEncryption(bucket, numParts, isStream, false); + } + + private void testMultipartUploadWithEncryption(OzoneBucket bucket, + int numParts, boolean isStream, boolean invalidUploadID) throws Exception { String keyName = "mpu_test_key_" + numParts; // Initiate multipart upload String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationConfig.fromTypeAndFactor(RATIS, ONE)); + if (invalidUploadID) { + uploadID += "random1234"; + } + // Upload Parts Map partsMap = new TreeMap<>(); List partsData = new ArrayList<>(); @@ -631,14 +722,17 @@ private String uploadStreamPart(OzoneBucket bucket, String keyName, ByteBuffer dataBuffer = ByteBuffer.wrap(data); multipartStreamKey.write(dataBuffer, 0, length); + multipartStreamKey.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); multipartStreamKey.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = multipartStreamKey.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private String uploadPart(OzoneBucket bucket, String keyName, @@ -646,14 +740,17 @@ private String uploadPart(OzoneBucket bucket, String keyName, OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private void completeMultipartUpload(OzoneBucket bucket, String keyName, @@ -691,9 +788,7 @@ void testGetKeyProvider() throws Exception { KeyProvider kp3 = ozClient.getObjectStore().getKeyProvider(); assertNotEquals(kp3, kpSpy); - // Restore ozClient and store - TestOzoneRpcClient.setOzClient(OzoneClientFactory.getRpcClient(conf)); - TestOzoneRpcClient.setStore(ozClient.getObjectStore()); + reInitClient(); } private static RepeatedOmKeyInfo getMatchedKeyInfo( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index a89e61769966..b943930f62f3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -17,8 +17,14 @@ package org.apache.hadoop.ozone.client.rpc; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.HashMap; + +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -44,13 +50,13 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.StringUtils.string2Bytes; @@ -78,6 +84,7 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -97,6 +104,7 @@ public class TestOzoneClientMultipartUploadWithFSO { private static ObjectStore store = null; private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; + private static MessageDigest eTagProvider; private String volumeName; private String bucketName; @@ -114,8 +122,10 @@ public class TestOzoneClientMultipartUploadWithFSO { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); } /** @@ -135,7 +145,6 @@ public static void shutdown() throws IOException { static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); @@ -187,6 +196,9 @@ public void testInitiateMultipartUploadWithDefaultReplication() throws assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(keyName, multipartInfo.getKeyName()); assertNotNull(multipartInfo.getUploadID()); // Call initiate multipart upload for the same key again, this should @@ -194,6 +206,9 @@ public void testInitiateMultipartUploadWithDefaultReplication() throws multipartInfo = bucket.initiateMultipartUpload(keyName); assertNotNull(multipartInfo); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(keyName, multipartInfo.getKeyName()); assertNotEquals(multipartInfo.getUploadID(), uploadID); assertNotNull(multipartInfo.getUploadID()); } @@ -207,13 +222,14 @@ public void testUploadPartWithNoOverride() throws IOException { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); } @Test @@ -223,12 +239,12 @@ public void testUploadPartOverrideWithRatis() throws Exception { ReplicationType.RATIS, THREE); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - sampleData.getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, sampleData.getBytes(UTF_8)); //Overwrite the part by creating part key with same part number. - String partNameNew = uploadPart(bucket, keyName, uploadID, partNumber, - "name".getBytes(UTF_8)); + Pair partNameAndETagNew = uploadPart(bucket, keyName, + uploadID, partNumber, "name".getBytes(UTF_8)); // PartName should be same from old part Name. // AWS S3 for same content generates same partName during upload part. @@ -238,7 +254,10 @@ public void testUploadPartOverrideWithRatis() throws Exception { // So, when a part is override partNames will still be same irrespective // of content in ozone s3. This will make S3 Mpu completeMPU pass when // comparing part names and large file uploads work using aws cp. - assertEquals(partName, partNameNew, "Part names should be same"); + assertEquals(partNameAndETag.getKey(), partNameAndETagNew.getKey()); + + // ETags are not equal due to content differences + assertNotEquals(partNameAndETag.getValue(), partNameAndETagNew.getValue()); // old part bytes written needs discard and have only // new part bytes in quota for this bucket @@ -248,7 +267,8 @@ public void testUploadPartOverrideWithRatis() throws Exception { } @Test - public void testUploadTwiceWithEC() throws IOException { + public void testUploadTwiceWithEC() + throws IOException, NoSuchAlgorithmException { bucketName = UUID.randomUUID().toString(); bucket = getOzoneECBucket(bucketName); @@ -259,12 +279,12 @@ public void testUploadTwiceWithEC() throws IOException { String uploadID = multipartInfo.getUploadID(); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - data); - - Map partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, data); + + Map eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); long replicatedSize = QuotaUtil.getReplicatedSize(data.length, bucket.getReplicationConfig()); @@ -275,12 +295,12 @@ public void testUploadTwiceWithEC() throws IOException { multipartInfo = bucket.initiateMultipartUpload(keyName); uploadID = multipartInfo.getUploadID(); - partName = uploadPart(bucket, keyName, uploadID, partNumber, + partNameAndETag = uploadPart(bucket, keyName, uploadID, partNumber, data); - partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); // used sized should remain same, overwrite previous upload assertEquals(volume.getBucket(bucketName).getUsedBytes(), @@ -288,7 +308,8 @@ public void testUploadTwiceWithEC() throws IOException { } @Test - public void testUploadAbortWithEC() throws IOException { + public void testUploadAbortWithEC() + throws IOException, NoSuchAlgorithmException { byte[] data = generateData(81920, (byte) 97); bucketName = UUID.randomUUID().toString(); @@ -331,19 +352,19 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { ONE); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); + eTagsMap.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, - "data".getBytes(UTF_8)); - partsMap.put(2, partName); + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, + "data".getBytes(UTF_8)); + eTagsMap.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -354,22 +375,24 @@ public void testMultipartUploadWithDiscardedUnusedPartSize() byte[] data = generateData(10000000, (byte) 97); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); - // Upload part 1 and add it to the partsMap for completing the upload. - String partName1 = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName1); + // Upload part 1 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, data); + eTagsMap.put(1, partNameAndETag1.getValue()); - // Upload part 2 and add it to the partsMap for completing the upload. - String partName2 = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName2); + // Upload part 2 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, data); + eTagsMap.put(2, partNameAndETag2.getValue()); - // Upload part 3 but do not add it to the partsMap. + // Upload part 3 but do not add it to the eTagsMap. uploadPart(bucket, keyName, uploadID, 3, data); - completeMultipartUpload(bucket, keyName, uploadID, partsMap); + completeMultipartUpload(bucket, keyName, uploadID, eTagsMap); - // Check the bucket size. Since part number 3 was not added to the partsMap, + // Check the bucket size. Since part number 3 was not added to the eTagsMap, // the unused part size should be discarded from the bucket size, // 30000000 - 10000000 = 20000000 long bucketSize = volume.getBucket(bucketName).getUsedBytes(); @@ -456,6 +479,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -464,10 +490,13 @@ public void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, uploadID); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -540,12 +569,13 @@ public void testAbortUploadSuccessWithParts() throws Exception { String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); OMMetadataManager metadataMgr = cluster.getOzoneManager().getMetadataManager(); - String multipartKey = verifyUploadedPart(uploadID, partName, metadataMgr); + String multipartKey = verifyUploadedPart(uploadID, partNameAndETag.getKey(), + metadataMgr); bucket.abortMultipartUpload(keyName, uploadID); @@ -571,17 +601,17 @@ public void testListMultipartUploadParts() throws Exception { Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -639,7 +669,6 @@ private void verifyPartNamesInDB(Map partsMap, listPartNames.remove(partKeyName); } - assertThat(listPartNames).withFailMessage("Wrong partKeyName format in DB!").isEmpty(); } @@ -661,17 +690,17 @@ public void testListMultipartUploadPartsWithContinuation() Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -733,8 +762,8 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() bucket.listParts(keyName, uploadID, 100, 2); // Should return empty - assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); + assertEquals( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), ozoneMultipartUploadPartListParts.getReplicationConfig()); @@ -845,7 +874,8 @@ private String verifyUploadedPart(String uploadID, String partName, assertNotNull(omKeyInfo); assertNotNull(omMultipartKeyInfo); - assertEquals(OzoneFSUtils.getFileName(keyName), omKeyInfo.getKeyName()); + assertEquals(keyName, omKeyInfo.getKeyName()); + assertEquals(OzoneFSUtils.getFileName(keyName), omKeyInfo.getFileName()); assertEquals(uploadID, omMultipartKeyInfo.getUploadID()); for (OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo : @@ -869,27 +899,37 @@ private String initiateMultipartUpload(OzoneBucket oBucket, String kName, assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(kName, multipartInfo.getKeyName()); assertNotNull(multipartInfo.getUploadID()); return uploadID; } - private String uploadPart(OzoneBucket oBucket, String kName, String - uploadID, int partNumber, byte[] data) throws IOException { + private Pair uploadPart(OzoneBucket oBucket, String kName, + String uploadID, int partNumber, + byte[] data) + throws IOException, NoSuchAlgorithmException { OzoneOutputStream ozoneOutputStream = oBucket.createMultipartKey(kName, data.length, partNumber, uploadID); - ozoneOutputStream.write(data, 0, - data.length); + ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } private void completeMultipartUpload(OzoneBucket oBucket, String kName, @@ -898,6 +938,11 @@ private void completeMultipartUpload(OzoneBucket oBucket, String kName, .completeMultipartUpload(kName, uploadID, partsMap); assertNotNull(omMultipartUploadCompleteInfo); + assertEquals(omMultipartUploadCompleteInfo.getBucket(), oBucket + .getName()); + assertEquals(omMultipartUploadCompleteInfo.getVolume(), oBucket + .getVolumeName()); + assertEquals(omMultipartUploadCompleteInfo.getKey(), kName); assertNotNull(omMultipartUploadCompleteInfo.getHash()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java index c3e8a8d461b8..ad59621e0c75 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java @@ -109,7 +109,6 @@ public void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java index cd99382f300b..3e1667a38a68 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java @@ -118,7 +118,6 @@ public void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index a87d05321e27..d96d8d0cae29 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -17,14 +17,15 @@ package org.apache.hadoop.ozone.client.rpc; -import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; @@ -38,6 +39,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec; @@ -91,11 +95,11 @@ import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; @@ -113,22 +117,26 @@ import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.S3SecretValue; +import org.apache.hadoop.ozone.om.protocol.S3Auth; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import static java.nio.charset.StandardCharsets.UTF_8; -import org.apache.commons.io.FileUtils; + import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OmUtils.LOG; import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; @@ -136,7 +144,11 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.DEFAULT_OM_UPDATE_ID; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.corruptData; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.getContainerByID; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; @@ -159,7 +171,7 @@ import static org.junit.jupiter.api.Assertions.fail; import static org.slf4j.event.Level.DEBUG; -import org.apache.ozone.test.tag.Unhealthy; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; @@ -186,13 +198,19 @@ public abstract class TestOzoneRpcClientAbstract { private static String remoteUserName = "remoteUser"; private static String remoteGroupName = "remoteGroup"; private static OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); private static OzoneAcl defaultGroupAcl = new OzoneAcl(GROUP, remoteGroupName, - READ, DEFAULT); + DEFAULT, READ); private static OzoneAcl inheritedUserAcl = new OzoneAcl(USER, remoteUserName, - READ, ACCESS); + ACCESS, READ); private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, - remoteGroupName, READ, ACCESS); + remoteGroupName, ACCESS, READ); + private static MessageDigest eTagProvider; + + @BeforeAll + public static void initialize() throws NoSuchAlgorithmException { + eTagProvider = MessageDigest.getInstance(MD5_HASH); + } /** * Create a MiniOzoneCluster for testing. @@ -203,6 +221,7 @@ static void startCluster(OzoneConfiguration conf) throws Exception { // Reduce long wait time in MiniOzoneClusterImpl#waitForHddsDatanodesStop // for testZReadKeyWithUnhealthyContainerReplica. conf.set("ozone.scm.stale.node.interval", "10s"); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.MB) .setDataStreamMinPacketSize(1) @@ -210,7 +229,6 @@ static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(14) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); @@ -343,6 +361,65 @@ public void testBucketSetOwner() throws IOException { store.deleteVolume(volumeName); } + @Test void testKeyOwner() throws IOException { + // Save the old user, and switch to the old user after test + UserGroupInformation oldUser = UserGroupInformation.getCurrentUser(); + try { + // user1 create a key key1 + // user1 create a key key2 + UserGroupInformation user1 = UserGroupInformation + .createUserForTesting("user1", new String[] {"user1"}); + UserGroupInformation user2 = UserGroupInformation + .createUserForTesting("user2", new String[] {"user2"}); + String key1 = "key1"; + String key2 = "key2"; + String content = "1234567890"; + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + store.createVolume(volumeName); + store.getVolume(volumeName).createBucket(bucketName); + OzoneObj volumeObj = OzoneObjInfo.Builder.newBuilder() + .setVolumeName(volumeName).setStoreType(OzoneObj.StoreType.OZONE) + .setResType(OzoneObj.ResourceType.VOLUME).build(); + OzoneObj bucketObj = OzoneObjInfo.Builder.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setStoreType(OzoneObj.StoreType.OZONE) + .setResType(OzoneObj.ResourceType.BUCKET).build(); + store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user2", ACCESS, ACLType.ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user2", ACCESS, ACLType.ALL)); + + createKeyForUser(volumeName, bucketName, key1, content, user1); + createKeyForUser(volumeName, bucketName, key2, content, user2); + UserGroupInformation.setLoginUser(oldUser); + setOzClient(OzoneClientFactory.getRpcClient(cluster.getConf())); + setStore(ozClient.getObjectStore()); + OzoneBucket bucket = store.getVolume(volumeName).getBucket(bucketName); + assertNotNull(bucket.getKey(key1)); + assertNotNull(bucket.getKey(key2)); + assertEquals(user1.getShortUserName(), + bucket.getKey(key1).getOwner()); + assertEquals(user2.getShortUserName(), + bucket.getKey(key2).getOwner()); + } finally { + UserGroupInformation.setLoginUser(oldUser); + setOzClient(OzoneClientFactory.getRpcClient(cluster.getConf())); + setStore(ozClient.getObjectStore()); + } + + } + + private void createKeyForUser(String volumeName, String bucketName, + String keyName, String keyContent, UserGroupInformation user) + throws IOException { + UserGroupInformation.setLoginUser(user); + setOzClient(OzoneClientFactory.getRpcClient(cluster.getConf())); + setStore(ozClient.getObjectStore()); + OzoneBucket bucket = store.getVolume(volumeName).getBucket(bucketName); + createTestKey(bucket, keyName, keyContent); + } + @Test public void testSetAndClrQuota() throws Exception { String volumeName = UUID.randomUUID().toString(); @@ -659,13 +736,11 @@ public void testCreateBucketWithAcls() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - READ, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); + ACCESS, READ); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(userAcl); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); assertEquals(bucketName, bucket.getName()); @@ -695,16 +770,14 @@ public void testCreateBucketWithAllArgument() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); + ACCESS, ACLType.ALL); ReplicationConfig repConfig = new ECReplicationConfig(3, 2); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder(); builder.setVersioning(true) .setStorageType(StorageType.SSD) - .setAcls(acls) + .addAcl(userAcl) .setDefaultReplicationConfig(new DefaultReplicationConfig(repConfig)); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); @@ -736,7 +809,7 @@ public void testAddBucketAcl() OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); List acls = new ArrayList<>(); - acls.add(new OzoneAcl(USER, "test", ACLType.ALL, ACCESS)); + acls.add(new OzoneAcl(USER, "test", ACCESS, ACLType.ALL)); OzoneBucket bucket = volume.getBucket(bucketName); for (OzoneAcl acl : acls) { assertTrue(bucket.addAcl(acl)); @@ -752,21 +825,17 @@ public void testRemoveBucketAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); + ACCESS, ACLType.ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(userAcl); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); - for (OzoneAcl acl : acls) { - assertTrue(bucket.removeAcl(acl)); - } + assertTrue(bucket.removeAcl(userAcl)); OzoneBucket newBucket = volume.getBucket(bucketName); assertEquals(bucketName, newBucket.getName()); - assertThat(bucket.getAcls()).doesNotContain(acls.get(0)); + assertThat(newBucket.getAcls()).doesNotContain(userAcl); } @Test @@ -775,15 +844,14 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); - acls.add(new OzoneAcl(USER, "test1", - ACLType.ALL, ACCESS)); + ACCESS, ACLType.ALL); + OzoneAcl acl2 = new OzoneAcl(USER, "test1", + ACCESS, ACLType.ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(userAcl) + .addAcl(acl2); volume.createBucket(bucketName, builder.build()); OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() .setBucketName(bucketName) @@ -792,13 +860,11 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() .setResType(OzoneObj.ResourceType.BUCKET).build(); // Remove the 2nd acl added to the list. - boolean remove = store.removeAcl(ozoneObj, acls.get(1)); - assertTrue(remove); - assertThat(store.getAcl(ozoneObj)).doesNotContain(acls.get(1)); + assertTrue(store.removeAcl(ozoneObj, acl2)); + assertThat(store.getAcl(ozoneObj)).doesNotContain(acl2); - remove = store.removeAcl(ozoneObj, acls.get(0)); - assertTrue(remove); - assertThat(store.getAcl(ozoneObj)).doesNotContain(acls.get(0)); + assertTrue(store.removeAcl(ozoneObj, userAcl)); + assertThat(store.getAcl(ozoneObj)).doesNotContain(userAcl); } @Test @@ -1329,7 +1395,6 @@ public void testMissingParentBucketUsedNamespace(BucketLayout layout) if (layout.equals(BucketLayout.LEGACY)) { OzoneConfiguration conf = cluster.getConf(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - cluster.setConf(conf); } // the directory "/dir1", ""/dir1/dir2/", "/dir1/dir2/dir3/" @@ -1447,8 +1512,17 @@ private void writeKey(OzoneBucket bucket, String keyName, ReplicationFactor replication, String value, int valueLength) throws IOException { - OzoneOutputStream out = bucket.createKey(keyName, valueLength, RATIS, - replication, new HashMap<>()); + writeKey(bucket, keyName, replication, value, valueLength, + Collections.emptyMap(), Collections.emptyMap()); + } + + private void writeKey(OzoneBucket bucket, String keyName, + ReplicationFactor replication, String value, + int valueLength, Map customMetadata, + Map tags) + throws IOException { + OzoneOutputStream out = bucket.createKey(keyName, valueLength, + ReplicationConfig.fromTypeAndFactor(RATIS, replication), customMetadata, tags); out.write(value.getBytes(UTF_8)); out.close(); } @@ -1491,6 +1565,7 @@ public void testUsedBytesWithUploadPart() throws IOException { sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); assertEquals(valueLength, store.getVolume(volumeName) @@ -1646,6 +1721,7 @@ public void testPutKeyRatisThreeNodesParallel() throws IOException, } latch.countDown(); } catch (IOException ex) { + LOG.error("Execution failed: ", ex); latch.countDown(); failCount.incrementAndGet(); } @@ -1717,16 +1793,9 @@ private void createAndCorruptKey(String volumeName, String bucketName, // Get the container by traversing the datanodes. Atleast one of the // datanode must have this container. - Container container = null; - for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) { - container = hddsDatanode.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID); - if (container != null) { - break; - } - } + Container container = getContainerByID(cluster, containerID); assertNotNull(container, "Container not found"); - corruptData(container, key); + corruptData(cluster, container, key); } @@ -1889,7 +1958,7 @@ public void testReadKeyWithCorruptedData() throws IOException { } } assertNotNull(container, "Container not found"); - corruptData(container, key); + corruptData(cluster, container, key); // Try reading the key. Since the chunk file is corrupted, it should // throw a checksum mismatch exception. @@ -2044,7 +2113,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { } } assertThat(containerList).withFailMessage("Container not found").isNotEmpty(); - corruptData(containerList.get(0), key); + corruptData(cluster, containerList.get(0), key); // Try reading the key. Read will fail on the first node and will eventually // failover to next replica try (OzoneInputStream is = bucket.readKey(keyName)) { @@ -2052,7 +2121,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { is.read(b); assertArrayEquals(b, data); } - corruptData(containerList.get(1), key); + corruptData(cluster, containerList.get(1), key); // Try reading the key. Read will fail on the first node and will eventually // failover to next replica try (OzoneInputStream is = bucket.readKey(keyName)) { @@ -2060,7 +2129,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { is.read(b); assertArrayEquals(b, data); } - corruptData(containerList.get(2), key); + corruptData(cluster, containerList.get(2), key); // Try reading the key. Read will fail here as all the replicas are corrupt IOException ioException = assertThrows(IOException.class, () -> { @@ -2072,43 +2141,6 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { assertThat(ioException).hasMessageContaining("Checksum mismatch"); } - private void corruptData(Container container, OzoneKey key) - throws IOException { - long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getContainerID(); - long localID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getLocalID(); - // From the containerData, get the block iterator for all the blocks in - // the container. - KeyValueContainerData containerData = - (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); - BlockIterator keyValueBlockIterator = - db.getStore().getBlockIterator(containerID)) { - // Find the block corresponding to the key we put. We use the localID of - // the BlockData to identify out key. - BlockData blockData = null; - while (keyValueBlockIterator.hasNext()) { - blockData = keyValueBlockIterator.nextBlock(); - if (blockData.getBlockID().getLocalID() == localID) { - break; - } - } - assertNotNull(blockData, "Block not found"); - - // Get the location of the chunk file - String containreBaseDir = - container.getContainerData().getVolume().getHddsRootDir().getPath(); - File chunksLocationPath = KeyValueContainerLocationUtil - .getChunksLocationPath(containreBaseDir, cluster.getClusterId(), containerID); - byte[] corruptData = "corrupted data".getBytes(UTF_8); - // Corrupt the contents of chunk files - for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { - FileUtils.writeByteArrayToFile(file, corruptData); - } - } - } - @Test public void testDeleteKey() throws Exception { @@ -2547,6 +2579,46 @@ public void testListKeyOnEmptyBucket() } } + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testCreateKeyWithMetadataAndTags(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + String value = "sample value"; + OzoneVolume volume = null; + store.createVolume(volumeName); + + volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + + OzoneBucket ozoneBucket = volume.getBucket(bucketName); + + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + + writeKey(ozoneBucket, keyName, ONE, value, value.length(), customMetadata, tags); + + OzoneKeyDetails keyDetails = ozoneBucket.getKey(keyName); + + Map keyMetadata = keyDetails.getMetadata(); + + Map keyTags = keyDetails.getTags(); + + assertThat(keyMetadata).containsAllEntriesOf(customMetadata); + assertThat(keyMetadata).doesNotContainKeys("tag-key1", "tag-key2"); + + assertThat(keyTags).containsAllEntriesOf(keyTags); + assertThat(keyTags).doesNotContainKeys("custom-key1", "custom-key2"); + } + static Stream replicationConfigs() { return Stream.of( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), @@ -2637,13 +2709,14 @@ void testUploadPartWithNoOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); } @ParameterizedTest @@ -2671,6 +2744,7 @@ void testUploadPartOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream @@ -2678,7 +2752,7 @@ void testUploadPartOverride(ReplicationConfig replication) assertNotNull(commitUploadPartInfo); String partName = commitUploadPartInfo.getPartName(); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // Overwrite the part by creating part key with same part number // and different content. @@ -2686,13 +2760,14 @@ void testUploadPartOverride(ReplicationConfig replication) ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // AWS S3 for same content generates same partName during upload part. // In AWS S3 ETag is generated from md5sum. In Ozone right now we @@ -2739,10 +2814,10 @@ public void testMultipartUploadWithACL() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Add ACL on Bucket - OzoneAcl acl1 = new OzoneAcl(USER, "Monday", ACLType.ALL, DEFAULT); - OzoneAcl acl2 = new OzoneAcl(USER, "Friday", ACLType.ALL, DEFAULT); - OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACLType.ALL, ACCESS); - OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACLType.ALL, ACCESS); + OzoneAcl acl1 = new OzoneAcl(USER, "Monday", DEFAULT, ACLType.ALL); + OzoneAcl acl2 = new OzoneAcl(USER, "Friday", DEFAULT, ACLType.ALL); + OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACCESS, ACLType.ALL); + OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACCESS, ACLType.ALL); bucket.addAcl(acl1); bucket.addAcl(acl2); bucket.addAcl(acl3); @@ -2776,8 +2851,8 @@ public void testMultipartUploadWithACL() throws Exception { try (OzoneClient client = remoteUser.doAs((PrivilegedExceptionAction) () -> OzoneClientFactory.getRpcClient(cluster.getConf()))) { - OzoneAcl acl5 = new OzoneAcl(USER, userName, ACLType.READ, DEFAULT); - OzoneAcl acl6 = new OzoneAcl(USER, userName, ACLType.READ, ACCESS); + OzoneAcl acl5 = new OzoneAcl(USER, userName, DEFAULT, ACLType.READ); + OzoneAcl acl6 = new OzoneAcl(USER, userName, ACCESS, ACLType.READ); OzoneObj volumeObj = OzoneObjInfo.Builder.newBuilder() .setVolumeName(volumeName).setStoreType(OzoneObj.StoreType.OZONE) .setResType(OzoneObj.ResourceType.VOLUME).build(); @@ -2800,10 +2875,10 @@ public void testMultipartUploadWithACL() throws Exception { assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult()); // Add create permission for user, and try multi-upload init again - OzoneAcl acl7 = new OzoneAcl(USER, userName, ACLType.CREATE, DEFAULT); - OzoneAcl acl8 = new OzoneAcl(USER, userName, ACLType.CREATE, ACCESS); - OzoneAcl acl9 = new OzoneAcl(USER, userName, WRITE, DEFAULT); - OzoneAcl acl10 = new OzoneAcl(USER, userName, WRITE, ACCESS); + OzoneAcl acl7 = new OzoneAcl(USER, userName, DEFAULT, ACLType.CREATE); + OzoneAcl acl8 = new OzoneAcl(USER, userName, ACCESS, ACLType.CREATE); + OzoneAcl acl9 = new OzoneAcl(USER, userName, DEFAULT, WRITE); + OzoneAcl acl10 = new OzoneAcl(USER, userName, ACCESS, WRITE); store.addAcl(volumeObj, acl7); store.addAcl(volumeObj, acl8); store.addAcl(volumeObj, acl9); @@ -2818,12 +2893,13 @@ public void testMultipartUploadWithACL() throws Exception { // Upload part byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 1); - String partName = uploadPart(bucket, keyName2, uploadId, 1, data); - Map partsMap = new TreeMap<>(); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName2, + uploadId, 1, data); + Map eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, partNameAndETag.getValue()); // Complete multipart upload request - completeMultipartUpload(bucket2, keyName2, uploadId, partsMap); + completeMultipartUpload(bucket2, keyName2, uploadId, eTagsMaps); // User without permission cannot read multi-uploaded object OMException ex = assertThrows(OMException.class, () -> { @@ -2855,7 +2931,86 @@ void testMultipartUploadOverride(ReplicationConfig replication) doMultipartUpload(bucket, keyName, (byte)97, replication); } - + @Test + public void testMultipartUploadOwner() throws Exception { + // Save the old user, and switch to the old user after test + UserGroupInformation oldUser = UserGroupInformation.getCurrentUser(); + try { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName1 = UUID.randomUUID().toString(); + String keyName2 = UUID.randomUUID().toString(); + UserGroupInformation user1 = UserGroupInformation + .createUserForTesting("user1", new String[]{"user1"}); + UserGroupInformation awsUser1 = UserGroupInformation + .createUserForTesting("awsUser1", new String[]{"awsUser1"}); + ReplicationConfig replication = RatisReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.THREE); + + // create volume and bucket and add ACL + store.createVolume(volumeName); + store.getVolume(volumeName).createBucket(bucketName); + OzoneObj volumeObj = OzoneObjInfo.Builder.newBuilder() + .setVolumeName(volumeName).setStoreType(OzoneObj.StoreType.OZONE) + .setResType(OzoneObj.ResourceType.VOLUME).build(); + OzoneObj bucketObj = OzoneObjInfo.Builder.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setStoreType(OzoneObj.StoreType.OZONE) + .setResType(OzoneObj.ResourceType.BUCKET).build(); + store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "awsUser1", ACCESS, ACLType.ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "awsUser1", ACCESS, ACLType.ALL)); + + // user1 MultipartUpload a key + UserGroupInformation.setLoginUser(user1); + setOzClient(OzoneClientFactory.getRpcClient(cluster.getConf())); + setStore(ozClient.getObjectStore()); + OzoneBucket bucket = store.getVolume(volumeName).getBucket(bucketName); + doMultipartUpload(bucket, keyName1, (byte) 96, replication); + + assertEquals(user1.getShortUserName(), + bucket.getKey(keyName1).getOwner()); + + // After HDDS-5881 the user will not be different, + // as S3G uses single RpcClient. + // * performing the operation. the real user is an AWS user + // form AWS client. + String strToSign = "AWS4-HMAC-SHA256\n" + + "20150830T123600Z\n" + + "20150830/us-east-1/iam/aws4_request\n" + + "f536975d06c0309214f805bb90ccff089219ecd68b2" + + "577efef23edd43b7e1a59"; + String signature = "5d672d79c15b13162d9279b0855cfba" + + "6789a8edb4c82c400e06b5924a6f2b5d7"; + String secret = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"; + S3Auth s3Auth = new S3Auth(strToSign, signature, + awsUser1.getShortUserName(), awsUser1.getShortUserName()); + // Add secret to S3Secret table. + S3SecretManager s3SecretManager = cluster.getOzoneManager() + .getS3SecretManager(); + s3SecretManager.storeSecret(awsUser1.getShortUserName(), + S3SecretValue.of(awsUser1.getShortUserName(), secret)); + setOzClient(OzoneClientFactory.getRpcClient(cluster.getConf())); + setStore(ozClient.getObjectStore()); + + // set AWS user for RPCClient and OzoneManager + store.getClientProxy().setThreadLocalS3Auth(s3Auth); + OzoneManager.setS3Auth(OzoneManagerProtocolProtos.S3Authentication + .newBuilder().setAccessId(awsUser1.getUserName()).build()); + // awsUser1 create a key + bucket = store.getVolume(volumeName).getBucket(bucketName); + doMultipartUpload(bucket, keyName2, (byte)96, replication); + + assertEquals(awsUser1.getShortUserName(), + bucket.getKey(keyName2).getOwner()); + } finally { + OzoneManager.setS3Auth(null); + UserGroupInformation.setLoginUser(oldUser); + setOzClient(OzoneClientFactory.getRpcClient(cluster.getConf())); + setStore(ozClient.getObjectStore()); + } + } @Test public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { @@ -2873,21 +3028,21 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { anyReplication()); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMaps = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, + uploadID, 1, "data".getBytes(UTF_8)); + eTagsMaps.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes(UTF_8)); - partsMap.put(2, partName); + eTagsMaps.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @Test @@ -2934,11 +3089,11 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(1, UUID.randomUUID().toString()); + TreeMap eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, DigestUtils.md5Hex(UUID.randomUUID().toString())); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @@ -2960,11 +3115,11 @@ public void testMultipartUploadWithMissingParts() throws Exception { uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(3, "random"); + TreeMap eTagsMap = new TreeMap<>(); + eTagsMap.put(3, DigestUtils.md5Hex("random")); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -2996,6 +3151,60 @@ public void testMultipartPartNumberExceedingAllowedRange() throws Exception { keyName, sampleData.length(), 10001, uploadID)); } + @ParameterizedTest + @MethodSource("replicationConfigs") + public void testMultipartUploadWithCustomMetadata(ReplicationConfig replication) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + // Create custom metadata + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + + doMultipartUpload(bucket, keyName, (byte) 98, replication, customMetadata, Collections.emptyMap()); + } + + @ParameterizedTest + @MethodSource({"replicationConfigs"}) + public void testMultipartUploadWithTags(ReplicationConfig replication) throws Exception { + testMultipartUploadWithTags(replication, BucketLayout.OBJECT_STORE); + } + + @ParameterizedTest + @MethodSource({"bucketLayouts"}) + public void testMultipartUploadWithTags(BucketLayout bucketLayout) throws Exception { + testMultipartUploadWithTags(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), bucketLayout); + } + + private void testMultipartUploadWithTags(ReplicationConfig replication, BucketLayout bucketLayout) + throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + // Create tags + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + + doMultipartUpload(bucket, keyName, (byte) 96, replication, Collections.emptyMap(), tags); + } + @Test public void testAbortUploadFail() throws Exception { String volumeName = UUID.randomUUID().toString(); @@ -3063,6 +3272,9 @@ void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -3071,10 +3283,13 @@ void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -3144,17 +3359,17 @@ void testListMultipartUploadParts(ReplicationConfig replication) Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -3195,17 +3410,17 @@ void testListMultipartUploadPartsWithContinuation( Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -3473,11 +3688,7 @@ public void testNativeAclsForPrefix() throws Exception { .setStoreType(OzoneObj.StoreType.OZONE) .build(); - // add acl - BitSet aclRights1 = new BitSet(); - aclRights1.set(READ.ordinal()); - OzoneAcl user1Acl = new OzoneAcl(USER, - "user1", aclRights1, ACCESS); + OzoneAcl user1Acl = new OzoneAcl(USER, "user1", ACCESS, READ); assertTrue(store.addAcl(prefixObj, user1Acl)); // get acl @@ -3490,11 +3701,7 @@ public void testNativeAclsForPrefix() throws Exception { aclsGet = store.getAcl(prefixObj); assertEquals(0, aclsGet.size()); - // set acl - BitSet aclRights2 = new BitSet(); - aclRights2.set(ACLType.ALL.ordinal()); - OzoneAcl group1Acl = new OzoneAcl(GROUP, - "group1", aclRights2, ACCESS); + OzoneAcl group1Acl = new OzoneAcl(GROUP, "group1", ACCESS, ACLType.ALL); List acls = new ArrayList<>(); acls.add(user1Acl); acls.add(group1Acl); @@ -3534,12 +3741,11 @@ private List getAclList(OzoneConfiguration conf) ACLType userRights = aclConfig.getUserDefaultRights(); ACLType groupRights = aclConfig.getGroupDefaultRights(); - listOfAcls.add(new OzoneAcl(USER, - ugi.getUserName(), userRights, ACCESS)); + listOfAcls.add(new OzoneAcl(USER, ugi.getUserName(), ACCESS, userRights)); //Group ACLs of the User List userGroups = Arrays.asList(ugi.getGroupNames()); userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, groupRights, ACCESS))); + new OzoneAcl(GROUP, group, ACCESS, groupRights))); return listOfAcls; } @@ -3555,7 +3761,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { if (expectedAcls.size() > 0) { OzoneAcl oldAcl = expectedAcls.get(0); OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), - ACLType.READ_ACL, oldAcl.getAclScope()); + oldAcl.getAclScope(), ACLType.READ_ACL); // Verify that operation successful. assertTrue(store.addAcl(ozObj, newAcl)); @@ -3606,9 +3812,9 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { // Reset acl's. OzoneAcl ua = new OzoneAcl(USER, "userx", - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); OzoneAcl ug = new OzoneAcl(GROUP, "userx", - ACLType.ALL, ACCESS); + ACCESS, ACLType.ALL); store.setAcl(ozObj, Arrays.asList(ua, ug)); newAcls = store.getAcl(ozObj); assertEquals(2, newAcls.size()); @@ -3632,8 +3838,14 @@ private byte[] generateData(int size, byte val) { private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, ReplicationConfig replication) throws Exception { + doMultipartUpload(bucket, keyName, val, replication, Collections.emptyMap(), Collections.emptyMap()); + } + + private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, + ReplicationConfig replication, Map customMetadata, Map tags) + throws Exception { // Initiate Multipart upload request - String uploadID = initiateMultipartUpload(bucket, keyName, replication); + String uploadID = initiateMultipartUpload(bucket, keyName, replication, customMetadata, tags); // Upload parts Map partsMap = new TreeMap<>(); @@ -3642,19 +3854,20 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, // than 5mb int length = 0; byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val); - String partName = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName); + Pair partNameAndEtag = uploadPart(bucket, keyName, uploadID, + 1, data); + partsMap.put(1, partNameAndEtag.getValue()); length += data.length; - partName = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName); + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 2, data); + partsMap.put(2, partNameAndEtag.getValue()); length += data.length; String part3 = UUID.randomUUID().toString(); - partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( UTF_8)); - partsMap.put(3, partName); + partsMap.put(3, partNameAndEtag.getValue()); length += part3.getBytes(UTF_8).length; // Complete multipart upload request @@ -3699,32 +3912,55 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, latestVersionLocations.getBlocksLatestVersionOnly() .forEach(omKeyLocationInfo -> assertNotEquals(-1, omKeyLocationInfo.getPartNumber())); + + Map keyMetadata = omKeyInfo.getMetadata(); + assertNotNull(keyMetadata.get(ETAG)); + if (customMetadata != null && !customMetadata.isEmpty()) { + assertThat(keyMetadata).containsAllEntriesOf(customMetadata); + } + + Map keyTags = omKeyInfo.getTags(); + if (keyTags != null && !keyTags.isEmpty()) { + assertThat(keyTags).containsAllEntriesOf(tags); + } } private String initiateMultipartUpload(OzoneBucket bucket, String keyName, ReplicationConfig replicationConfig) throws Exception { + return initiateMultipartUpload(bucket, keyName, replicationConfig, Collections.emptyMap(), Collections.emptyMap()); + } + + private String initiateMultipartUpload(OzoneBucket bucket, String keyName, + ReplicationConfig replicationConfig, Map customMetadata, + Map tags) throws Exception { OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - replicationConfig); + replicationConfig, customMetadata, tags); String uploadID = multipartInfo.getUploadID(); assertNotNull(uploadID); return uploadID; } - private String uploadPart(OzoneBucket bucket, String keyName, String - uploadID, int partNumber, byte[] data) throws Exception { + private Pair uploadPart(OzoneBucket bucket, String keyName, + String uploadID, int partNumber, + byte[] data) throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } @@ -3929,7 +4165,7 @@ public void testSetS3VolumeAcl() throws Exception { .setStoreType(OzoneObj.StoreType.OZONE) .build(); - OzoneAcl ozoneAcl = new OzoneAcl(USER, remoteUserName, WRITE, DEFAULT); + OzoneAcl ozoneAcl = new OzoneAcl(USER, remoteUserName, DEFAULT, WRITE); boolean result = store.addAcl(s3vVolume, ozoneAcl); @@ -4052,7 +4288,6 @@ private void checkExceptedResultForVersioningTest(String volumeName, } @Test - @Unhealthy("HDDS-8752") public void testOverWriteKeyWithAndWithOutVersioning() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java index e373b06d950c..8f3c82620402 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java @@ -83,10 +83,10 @@ public class TestOzoneRpcClientForAclAuditLog { private static UserGroupInformation ugi; private static final OzoneAcl USER_ACL = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, - "johndoe", IAccessAuthorizer.ACLType.ALL, ACCESS); + "johndoe", ACCESS, IAccessAuthorizer.ACLType.ALL); private static final OzoneAcl USER_ACL_2 = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, - "jane", IAccessAuthorizer.ACLType.ALL, ACCESS); + "jane", ACCESS, IAccessAuthorizer.ACLType.ALL); private static List aclListToAdd = new ArrayList<>(); private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index ffd80f359ff6..c4a452e1683e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -24,12 +24,16 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.HashMap; +import java.util.Locale; import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; @@ -37,6 +41,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -52,10 +57,13 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerStateMachine; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; @@ -165,11 +173,13 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException { } } - @Test - public void testMultiPartUploadWithStream() throws IOException { + @ParameterizedTest + @MethodSource("replicationConfigs") + void testMultiPartUploadWithStream(ReplicationConfig replicationConfig) + throws IOException, NoSuchAlgorithmException { String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); + String bucketName = replicationConfig.getReplicationType().name().toLowerCase(Locale.ROOT) + "-bucket"; + String keyName = replicationConfig.getReplication(); byte[] sampleData = new byte[1024 * 8]; @@ -180,11 +190,6 @@ public void testMultiPartUploadWithStream() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - ReplicationConfig replicationConfig = - ReplicationConfig.fromTypeAndFactor( - ReplicationType.RATIS, - THREE); - OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, replicationConfig); @@ -196,12 +201,15 @@ public void testMultiPartUploadWithStream() throws IOException { keyName, valueLength, 1, uploadID); ozoneStreamOutput.write(ByteBuffer.wrap(sampleData), 0, valueLength); + ozoneStreamOutput.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) + .digest(sampleData)).toLowerCase()); ozoneStreamOutput.close(); OzoneMultipartUploadPartListParts parts = bucket.listParts(keyName, uploadID, 0, 1); - assertEquals(parts.getPartInfoList().size(), 1); + assertEquals(1, parts.getPartInfoList().size()); OzoneMultipartUploadPartListParts.PartInfo partInfo = parts.getPartInfoList().get(0); @@ -315,7 +323,11 @@ public void testParallelDeleteBucketAndCreateKey() throws IOException, omSM.getHandler().setInjector(injector); thread1.start(); thread2.start(); - Thread.sleep(2000); + // Wait long enough for createKey's preExecute to finish executing + GenericTestUtils.waitFor(() -> { + return getCluster().getOzoneManager().getOmServerProtocol().getLastRequestToSubmit().getCmdType().equals( + Type.CreateKey); + }, 100, 10000); injector.resume(); try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index 15af5a2d8e06..323859be4312 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -81,9 +81,7 @@ import java.io.File; import java.io.IOException; import java.time.Instant; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; @@ -93,6 +91,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.TestDataUtil.cleanupDeletedTable; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; import static org.apache.ozone.test.GenericTestUtils.getTestStartTime; @@ -299,7 +298,7 @@ public void testFileRecovery(boolean forceRecovery) throws Exception { @ParameterizedTest @ValueSource(ints = {1 << 24, (1 << 24) + 1, (1 << 24) - 1}) public void testPreallocateFileRecovery(long dataSize) throws Exception { - cleanupDeletedTable(); + cleanupDeletedTable(ozoneManager); String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -370,25 +369,6 @@ public void testPreallocateFileRecovery(long dataSize) throws Exception { } } - private void cleanupDeletedTable() throws IOException { - Table deletedTable = ozoneManager.getMetadataManager().getDeletedTable(); - List nameList = new ArrayList<>(); - try (TableIterator> - keyIter = deletedTable.iterator()) { - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - nameList.add(kv.getKey()); - } - } - nameList.forEach(k -> { - try { - deletedTable.delete(k); - } catch (IOException e) { - // do nothing - } - }); - } - private void assertTokenIsNull(OmKeyInfo value) { value.getKeyLocationVersions() .forEach( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index a8029987fedd..1e22613f929b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -57,6 +57,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -100,6 +101,7 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10, TimeUnit.SECONDS); @@ -125,8 +127,7 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2). - setHbInterval(200) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index f8e9b552e3ea..6b64046cc192 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -263,38 +263,41 @@ public void testWatchForCommitForRetryfailure() throws Exception { HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE); XceiverClientSpi xceiverClient = clientManager .acquireClient(container1.getPipeline()); - assertEquals(1, xceiverClient.getRefcount()); - assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); - Pipeline pipeline = xceiverClient.getPipeline(); - TestHelper.createPipelineOnDatanode(pipeline, cluster); - XceiverClientReply reply = xceiverClient.sendCommandAsync( - ContainerTestHelper.getCreateContainerRequest( - container1.getContainerInfo().getContainerID(), - xceiverClient.getPipeline())); - reply.getResponse().get(); - long index = reply.getLogIndex(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); - // emulate closing pipeline when SCM detects DEAD datanodes - cluster.getStorageContainerManager() - .getPipelineManager().closePipeline(pipeline, false); - // again write data with more than max buffer limit. This wi - // just watch for a log index which in not updated in the commitInfo Map - // as well as there is no logIndex generate in Ratis. - // The basic idea here is just to test if its throws an exception. - ExecutionException e = assertThrows(ExecutionException.class, - () -> xceiverClient.watchForCommit(index + RandomUtils.nextInt(0, 100) + 10)); - // since the timeout value is quite long, the watch request will either - // fail with NotReplicated exceptio, RetryFailureException or - // RuntimeException - assertFalse(HddsClientUtils - .checkForException(e) instanceof TimeoutException); - // client should not attempt to watch with - // MAJORITY_COMMITTED replication level, except the grpc IO issue - if (!logCapturer.getOutput().contains("Connection refused")) { - assertThat(e.getMessage()).doesNotContain("Watch-MAJORITY_COMMITTED"); + try { + assertEquals(1, xceiverClient.getRefcount()); + assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); + Pipeline pipeline = xceiverClient.getPipeline(); + TestHelper.createPipelineOnDatanode(pipeline, cluster); + XceiverClientReply reply = xceiverClient.sendCommandAsync( + ContainerTestHelper.getCreateContainerRequest( + container1.getContainerInfo().getContainerID(), + xceiverClient.getPipeline())); + reply.getResponse().get(); + long index = reply.getLogIndex(); + cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); + cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); + // emulate closing pipeline when SCM detects DEAD datanodes + cluster.getStorageContainerManager() + .getPipelineManager().closePipeline(pipeline, false); + // again write data with more than max buffer limit. This wi + // just watch for a log index which in not updated in the commitInfo Map + // as well as there is no logIndex generate in Ratis. + // The basic idea here is just to test if its throws an exception. + ExecutionException e = assertThrows(ExecutionException.class, + () -> xceiverClient.watchForCommit(index + RandomUtils.nextInt(0, 100) + 10)); + // since the timeout value is quite long, the watch request will either + // fail with NotReplicated exceptio, RetryFailureException or + // RuntimeException + assertFalse(HddsClientUtils + .checkForException(e) instanceof TimeoutException); + // client should not attempt to watch with + // MAJORITY_COMMITTED replication level, except the grpc IO issue + if (!logCapturer.getOutput().contains("Connection refused")) { + assertThat(e.getMessage()).doesNotContain("Watch-MAJORITY_COMMITTED"); + } + } finally { + clientManager.releaseClient(xceiverClient, false); } - clientManager.releaseClient(xceiverClient, false); } } @@ -309,35 +312,38 @@ public void test2WayCommitForTimeoutException() throws Exception { HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE); XceiverClientSpi xceiverClient = clientManager .acquireClient(container1.getPipeline()); - assertEquals(1, xceiverClient.getRefcount()); - assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); - Pipeline pipeline = xceiverClient.getPipeline(); - TestHelper.createPipelineOnDatanode(pipeline, cluster); - XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; - XceiverClientReply reply = xceiverClient.sendCommandAsync( - ContainerTestHelper.getCreateContainerRequest( - container1.getContainerInfo().getContainerID(), - xceiverClient.getPipeline())); - reply.getResponse().get(); - assertEquals(3, ratisClient.getCommitInfoMap().size()); - List nodesInPipeline = pipeline.getNodes(); - for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { - // shutdown the ratis follower - if (nodesInPipeline.contains(dn.getDatanodeDetails()) - && RatisTestHelper.isRatisFollower(dn, pipeline)) { - cluster.shutdownHddsDatanode(dn.getDatanodeDetails()); - break; + try { + assertEquals(1, xceiverClient.getRefcount()); + assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); + Pipeline pipeline = xceiverClient.getPipeline(); + TestHelper.createPipelineOnDatanode(pipeline, cluster); + XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; + XceiverClientReply reply = xceiverClient.sendCommandAsync( + ContainerTestHelper.getCreateContainerRequest( + container1.getContainerInfo().getContainerID(), + xceiverClient.getPipeline())); + reply.getResponse().get(); + assertEquals(3, ratisClient.getCommitInfoMap().size()); + List nodesInPipeline = pipeline.getNodes(); + for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { + // shutdown the ratis follower + if (nodesInPipeline.contains(dn.getDatanodeDetails()) + && RatisTestHelper.isRatisFollower(dn, pipeline)) { + cluster.shutdownHddsDatanode(dn.getDatanodeDetails()); + break; + } } - } - reply = xceiverClient.sendCommandAsync(ContainerTestHelper - .getCloseContainer(pipeline, - container1.getContainerInfo().getContainerID())); - reply.getResponse().get(); - xceiverClient.watchForCommit(reply.getLogIndex()); + reply = xceiverClient.sendCommandAsync(ContainerTestHelper + .getCloseContainer(pipeline, + container1.getContainerInfo().getContainerID())); + reply.getResponse().get(); + xceiverClient.watchForCommit(reply.getLogIndex()); - // commitInfo Map will be reduced to 2 here - assertEquals(2, ratisClient.getCommitInfoMap().size()); - clientManager.releaseClient(xceiverClient, false); + // commitInfo Map will be reduced to 2 here + assertEquals(2, ratisClient.getCommitInfoMap().size()); + } finally { + clientManager.releaseClient(xceiverClient, false); + } String output = logCapturer.getOutput(); assertThat(output).contains("3 way commit failed"); assertThat(output).contains("TimeoutException"); @@ -354,27 +360,30 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE); XceiverClientSpi xceiverClient = clientManager .acquireClient(container1.getPipeline()); - assertEquals(1, xceiverClient.getRefcount()); - assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); - Pipeline pipeline = xceiverClient.getPipeline(); - XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; - long containerId = container1.getContainerInfo().getContainerID(); - XceiverClientReply reply = xceiverClient.sendCommandAsync( - ContainerTestHelper.getCreateContainerRequest(containerId, - xceiverClient.getPipeline())); - reply.getResponse().get(); - assertEquals(3, ratisClient.getCommitInfoMap().size()); - List pipelineList = new ArrayList<>(); - pipelineList.add(pipeline); - TestHelper.waitForPipelineClose(pipelineList, cluster); - // just watch for a log index which in not updated in the commitInfo Map - // as well as there is no logIndex generate in Ratis. - // The basic idea here is just to test if its throws an exception. - Exception e = - assertThrows(Exception.class, - () -> xceiverClient.watchForCommit(reply.getLogIndex() + RandomUtils.nextInt(0, 100) + 10)); - assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e)); - clientManager.releaseClient(xceiverClient, false); + try { + assertEquals(1, xceiverClient.getRefcount()); + assertEquals(container1.getPipeline(), xceiverClient.getPipeline()); + Pipeline pipeline = xceiverClient.getPipeline(); + XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; + long containerId = container1.getContainerInfo().getContainerID(); + XceiverClientReply reply = xceiverClient.sendCommandAsync( + ContainerTestHelper.getCreateContainerRequest(containerId, + xceiverClient.getPipeline())); + reply.getResponse().get(); + assertEquals(3, ratisClient.getCommitInfoMap().size()); + List pipelineList = new ArrayList<>(); + pipelineList.add(pipeline); + TestHelper.waitForPipelineClose(pipelineList, cluster); + // just watch for a log index which in not updated in the commitInfo Map + // as well as there is no logIndex generate in Ratis. + // The basic idea here is just to test if its throws an exception. + Exception e = + assertThrows(Exception.class, + () -> xceiverClient.watchForCommit(reply.getLogIndex() + RandomUtils.nextInt(0, 100) + 10)); + assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e)); + } finally { + clientManager.releaseClient(xceiverClient, false); + } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java index 9f5d04c56f94..256148dfb8de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java @@ -53,6 +53,7 @@ protected static MiniOzoneCluster newCluster( conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 5); conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64, StorageUnit.MB); @@ -73,7 +74,6 @@ protected static MiniOzoneCluster newCluster( return MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(5) .build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java index e045b48bda96..24064ae5c883 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java @@ -140,6 +140,7 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -148,8 +149,8 @@ public static void init() throws Exception { .setStreamBufferMaxSize(maxFlushSize) .applyTo(conf); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) - .setTotalPipelineNumLimit(10) + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java index d6bb591979f6..2a33ddc5677f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java @@ -442,6 +442,6 @@ public static int countReplicas(long containerID, MiniOzoneCluster cluster) { public static void waitForReplicaCount(long containerID, int count, MiniOzoneCluster cluster) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> countReplicas(containerID, cluster) == count, - 1000, 30000); + 200, 30000); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index d5564ac2315e..5ff8d713649e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -180,7 +180,6 @@ public void init() throws Exception { conf.setFromObject(replicationConf); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setHbInterval(50) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index cd25ee25c8f4..8d22eddadc59 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -85,10 +85,10 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, "1"); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) - .setTotalPipelineNumLimit(15) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index 8c35d5011a5d..0fd31bb4b728 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -183,10 +183,10 @@ static void runContainerStateMachineMetrics( static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index d4900bb48783..a4a5701f5491 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.metrics; import java.io.File; +import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.UUID; @@ -59,12 +60,15 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; /** * Test for metrics published by storage containers. */ @Timeout(300) public class TestContainerMetrics { + @TempDir + private Path tempDir; @Test public void testContainerMetrics() throws Exception { @@ -78,7 +82,7 @@ public void testContainerMetrics() throws Exception { Pipeline pipeline = MockPipeline .createSingleNodePipeline(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, @@ -105,6 +109,8 @@ public void testContainerMetrics() throws Exception { } HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); dispatcher.setClusterId(UUID.randomUUID().toString()); server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java index a1d436b3360a..2f18326f7b1b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java @@ -67,13 +67,13 @@ public void init() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(1) - .build(); + .setNumDatanodes(1); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 7a64ddc5d5e7..1c5da04c0a3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -30,11 +30,13 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.nio.file.Path; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -55,6 +57,8 @@ */ @Timeout(300) public class TestOzoneContainer { + @TempDir + private Path tempDir; @Test public void testCreateOzoneContainer( @@ -68,13 +72,15 @@ public void testCreateOzoneContainer( Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); DatanodeDetails datanodeDetails = randomDatanodeDetails(); container = ContainerTestUtils .getOzoneContainer(datanodeDetails, conf); + StorageVolumeUtil.getHddsVolumesList(container.getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); //Set clusterId and manually start ozone container. container.start(UUID.randomUUID().toString()); @@ -99,7 +105,7 @@ void testOzoneContainerStart( Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java index 73910ef00ff1..a1e8e1781f51 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -48,6 +50,9 @@ import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; import java.io.IOException; import java.nio.file.Path; @@ -79,6 +84,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getTestContainerID; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.apache.ozone.test.GenericTestUtils.LogCapturer.captureLogs; +import static org.apache.ozone.test.GenericTestUtils.setLogLevel; import static org.apache.ozone.test.GenericTestUtils.waitFor; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -138,6 +144,9 @@ public void setup() throws Exception { dn = aDatanode(); pipeline = createPipeline(singletonList(dn)); + + Logger logger = LoggerFactory.getLogger(ClientTrustManager.class); + setLogLevel(logger, Level.DEBUG); } @Test @@ -303,6 +312,9 @@ private OzoneContainer createAndStartOzoneContainerInstance() { StateContext stateContext = ContainerTestUtils.getMockContext(dn, conf); container = new OzoneContainer( dn, conf, stateContext, caClient, keyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); container.start(clusterID); } catch (Throwable e) { if (container != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index 715b0678a173..5585696dfc31 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -38,6 +38,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils; @@ -129,14 +131,17 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, try { Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.toString()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline .getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE) .getValue()); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); container = new OzoneContainer(dn, conf, ContainerTestUtils .getMockContext(dn, conf), caClient, secretKeyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); //Set scmId and manually start ozone container. container.start(UUID.randomUUID().toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 3c89bb12ee7a..630c4d314959 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; @@ -57,6 +58,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -69,6 +71,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; @@ -84,12 +87,14 @@ public class TestContainerServer { .getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClient caClient; + @TempDir + private Path tempDir; @BeforeAll public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); - CONF.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); + CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); caClient = new DNCertificateClient(new SecurityConfig(CONF), null, dn, null, null, null); @@ -104,7 +109,7 @@ public static void tearDown() throws Exception { public void testClientServer() throws Exception { DatanodeDetails datanodeDetails = randomDatanodeDetails(); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, @@ -121,10 +126,10 @@ public void testClientServerRatisGrpc() throws Exception { static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, @@ -182,7 +187,7 @@ static void runTestClientServer( } } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); @@ -192,6 +197,8 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); @@ -216,7 +223,7 @@ public void testClientServerWithContainerDispatcher() throws Exception { HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 53420c0e2209..8044685bb747 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.EnumSet; @@ -65,6 +66,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -103,6 +105,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; import static org.assertj.core.api.Assertions.assertThat; @@ -115,6 +118,8 @@ * Test Container servers when security is enabled. */ public class TestSecureContainerServer { + @TempDir + private Path tempDir; private static final String TEST_DIR = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -158,7 +163,7 @@ public void testClientServer() throws Exception { HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, @@ -166,7 +171,7 @@ public void testClientServer() throws Exception { hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, @@ -175,6 +180,8 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); @@ -199,16 +206,16 @@ public void testClientServerRatisGrpc() throws Exception { runTestClientServerRatis(GRPC, 3); } - static XceiverServerRatis newXceiverServerRatis( + XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, @@ -216,12 +223,12 @@ static XceiverServerRatis newXceiverServerRatis( caClient, null); } - private static void runTestClientServerRatis(RpcType rpc, int numNodes) + private void runTestClientServerRatis(RpcType rpc, int numNodes) throws Exception { runTestClientServer(numNodes, (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf), XceiverClientRatis::newXceiverClientRatis, - TestSecureContainerServer::newXceiverServerRatis, + this::newXceiverServerRatis, (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p), (p) -> { }); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index ec7eb81db33d..e94f46a398b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -22,6 +22,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -61,6 +62,7 @@ import java.util.stream.Stream; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -295,8 +297,7 @@ private void prepareTable(String tableName, boolean schemaV3) for (int i = 1; i <= 5; i++) { String key = "key" + i; OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", - key, HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.ONE); + key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE, HddsProtos.ReplicationFactor.ONE)).build(); keyTable.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index cca47e17e407..7c82633f1136 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -35,6 +35,7 @@ import org.junit.jupiter.api.Timeout; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.assertj.core.api.Assertions.assertThat; /** @@ -50,8 +51,9 @@ public class TestDnRatisLogParser { @BeforeEach public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 2); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1).setTotalPipelineNumLimit(2).build(); + .setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); System.setOut(new PrintStream(out, false, UTF_8.name())); System.setErr(new PrintStream(err, false, UTF_8.name())); @@ -71,7 +73,7 @@ public void destroy() throws Exception { public void testRatisLogParsing() throws Exception { OzoneConfiguration conf = cluster.getHddsDatanodes().get(0).getConf(); String path = - conf.get(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + conf.get(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); UUID pid = cluster.getStorageContainerManager().getPipelineManager() .getPipelines().get(0).getId().getId(); File pipelineDir = new File(path, pid.toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java index 3e22c1db90de..8d77b6cc58b5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java @@ -274,7 +274,6 @@ private static MiniOzoneCluster newCluster(boolean schemaV3) ozoneConfig.setFromObject(dnConf); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConfig) .setNumDatanodes(1) - .setNumDataVolumes(1) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(ReplicationFactor.ONE, 30000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java index 0273deb50e61..98ab87b871de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; @@ -90,7 +91,9 @@ public void init() throws Exception { ozoneConfig.setFromObject(dnConf); cluster = MiniOzoneCluster.newBuilder(ozoneConfig) .setNumDatanodes(1) - .setNumDataVolumes(3) + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setNumDataVolumes(3) + .build()) .build(); cluster.waitForClusterToBeReady(); datanodes = cluster.getHddsDatanodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java new file mode 100644 index 000000000000..f209783c7453 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.freon; + +import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import picocli.CommandLine; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Tests Freon, with MiniOzoneCluster and validate data. + */ +public class TestDNRPCLoadGenerator { + + private static MiniOzoneCluster cluster = null; + private static ContainerWithPipeline container; + + private static void startCluster(OzoneConfiguration conf) throws Exception { + DatanodeRatisServerConfig ratisServerConfig = + conf.getObject(DatanodeRatisServerConfig.class); + ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); + ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); + conf.setFromObject(ratisServerConfig); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5).build(); + cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, + 180000); + + StorageContainerLocationProtocolClientSideTranslatorPB + storageContainerLocationClient = cluster + .getStorageContainerLocationClient(); + container = + storageContainerLocationClient.allocateContainer( + SCMTestUtils.getReplicationType(conf), + HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE); + XceiverClientManager xceiverClientManager = new XceiverClientManager(conf); + XceiverClientSpi client = xceiverClientManager + .acquireClient(container.getPipeline()); + ContainerProtocolCalls.createContainer(client, + container.getContainerInfo().getContainerID(), null); + } + + static void shutdownCluster() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + startCluster(conf); + } + + @AfterAll + public static void shutdown() { + shutdownCluster(); + } + + private static Stream provideParameters() { + return Stream.of( + Arguments.of(true, true), + Arguments.of(true, false), + Arguments.of(false, true), + Arguments.of(false, false) + ); + } + + @ParameterizedTest + @MethodSource("provideParameters") + public void test(boolean readOnly, boolean ratis) { + DNRPCLoadGenerator randomKeyGenerator = + new DNRPCLoadGenerator(cluster.getConf()); + CommandLine cmd = new CommandLine(randomKeyGenerator); + List cmdArgs = new ArrayList<>(Arrays.asList( + "--container-id", Long.toString(container.getContainerInfo().getContainerID()), + "--clients", "5", + "-t", "10")); + + if (readOnly) { + cmdArgs.add("--read-only"); + } + if (ratis) { + cmdArgs.add("--ratis"); + } + + int exitCode = cmd.execute(cmdArgs.toArray(new String[0])); + assertEquals(0, exitCode); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index 28cc863c26d5..e1f2061c7d46 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.jupiter.api.Test; import picocli.CommandLine; @@ -52,8 +53,10 @@ static void startCluster(OzoneConfiguration conf) throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); conf.setFromObject(raftClientConfig); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 8); + cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5).setTotalPipelineNumLimit(8).build(); + .setNumDatanodes(5).build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, 180000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java index 0798731a839d..862b52c8e9e1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java @@ -34,6 +34,7 @@ import picocli.CommandLine; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -54,9 +55,8 @@ public class TestFreonWithDatanodeFastRestart { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) - .setHbProcessorInterval(1000) - .setHbInterval(1000) .setNumDatanodes(3) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java index d78beff7e78b..08c1b3bd3b35 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java @@ -61,6 +61,8 @@ public static void init() throws Exception { 1, TimeUnit.SECONDS); conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 1, TimeUnit.SECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 8); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -74,10 +76,7 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); cluster = MiniOzoneCluster.newBuilder(conf) - .setHbProcessorInterval(1000) - .setHbInterval(1000) .setNumDatanodes(3) - .setTotalPipelineNumLimit(8) .build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestMultiSyncer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestMultiSyncer.java new file mode 100644 index 000000000000..907eab84bf60 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestMultiSyncer.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.freon; + +import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import picocli.CommandLine; + +import java.io.IOException; +import java.time.Duration; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Tests Freon, with MiniOzoneCluster and validate data. + */ +public class TestMultiSyncer { + private static MiniOzoneCluster cluster = null; + + private static void startCluster(OzoneConfiguration conf) throws Exception { + DatanodeRatisServerConfig ratisServerConfig = + conf.getObject(DatanodeRatisServerConfig.class); + ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); + ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); + conf.setFromObject(ratisServerConfig); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + conf.set(OZONE_FS_HSYNC_ENABLED, "true"); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5).build(); + cluster.waitForClusterToBeReady(); + } + + static void shutdownCluster() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + startCluster(conf); + } + + @AfterAll + public static void shutdown() { + shutdownCluster(); + } + + @Test + public void test() throws IOException { + MultiSyncer randomKeyGenerator = + new MultiSyncer(cluster.getConf()); + CommandLine cmd = new CommandLine(randomKeyGenerator); + + String volumeName = "vol1"; + String bucketName = "bucket1"; + try (OzoneClient client = cluster.newClient()) { + ObjectStore store = client.getObjectStore(); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + + //String rootPath = "o3fs://" + bucketName + "." + volumeName; + String rootPath = String.format("%s://%s/%s/%s/", + OZONE_OFS_URI_SCHEME, cluster.getConf().get(OZONE_OM_ADDRESS_KEY), + volumeName, bucketName); + + // open 10 files to write (-n) + // 5 parallel writer threads (-t) + // each writer is associated with a thread pool of 5 syncers (--syncer-per-writer) + // each writer writes 1kb into the file (--bytes-per-write) + // each file is 1MB (--file-length) + int exitCode = cmd.execute( + "--path", rootPath, + "--size", "1048576", + "--bytes-per-write", "1024", + "--syncer-per-writer", "5", + "-t", "5", + "-n", "10"); + assertEquals(0, exitCode); + } + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index bca21aebd1ac..c566cae414fd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -39,7 +38,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ozone.test.GenericTestUtils; @@ -215,20 +213,16 @@ public void testDAGReconstruction() OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); RDBStore rdbStore = (RDBStore) omMetadataManager.getStore(); RocksDBCheckpointDiffer differ = rdbStore.getRocksDBCheckpointDiffer(); - ReferenceCounted - snapDB1 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap1")); - ReferenceCounted - snapDB2 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap2")); + ReferenceCounted snapDB1 = ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, "snap1"); + ReferenceCounted snapDB2 = ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, "snap2"); DifferSnapshotInfo snap1 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap1", - ((RDBStore)((OmSnapshot)snapDB1.get()) + ((RDBStore) snapDB1.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); DifferSnapshotInfo snap2 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap2", ((RDBStore)((OmSnapshot)snapDB2.get()) + volumeName, bucketName, "snap2", ((RDBStore) snapDB2.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); // RocksDB does checkpointing in a separate thread, wait for it @@ -247,13 +241,11 @@ public void testDAGReconstruction() resp = store.createSnapshot(volumeName, bucketName, "snap3"); LOG.debug("Snapshot created: {}", resp); - ReferenceCounted - snapDB3 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap3")); + ReferenceCounted snapDB3 = ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, "snap3"); DifferSnapshotInfo snap3 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap3", - ((RDBStore)((OmSnapshot)snapDB3.get()) + ((RDBStore) snapDB3.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); final File checkpointSnap3 = new File(snap3.getDbPath()); GenericTestUtils.waitFor(checkpointSnap3::exists, 2000, 20000); @@ -274,24 +266,21 @@ public void testDAGReconstruction() ozoneManager = cluster.getOzoneManager(); omMetadataManager = ozoneManager.getMetadataManager(); snapDB1 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap1")); + .getActiveSnapshot(volumeName, bucketName, "snap1"); snapDB2 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap2")); + .getActiveSnapshot(volumeName, bucketName, "snap2"); snap1 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap1", - ((RDBStore)((OmSnapshot)snapDB1.get()) + ((RDBStore) snapDB1.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); snap2 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap2", ((RDBStore)((OmSnapshot)snapDB2.get()) + volumeName, bucketName, "snap2", ((RDBStore) snapDB2.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); snapDB3 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap3")); + .getActiveSnapshot(volumeName, bucketName, "snap3"); snap3 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap3", - ((RDBStore)((OmSnapshot)snapDB3.get()) + ((RDBStore) snapDB3.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1); assertEquals(sstDiffList21, sstDiffList21Run2); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java index b74022b83e5d..3c7a04071b3c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java @@ -34,7 +34,8 @@ import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -88,8 +89,9 @@ private void shutdown() { * * @throws IOException */ - private void startCluster() throws Exception { + private void startCluster(boolean fsPathsEnabled) throws Exception { conf = getOzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, fsPathsEnabled); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.OBJECT_STORE.name()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); @@ -104,10 +106,11 @@ private OzoneConfiguration getOzoneConfiguration() { return new OzoneConfiguration(); } - @Test - public void testOmBucketReadWriteKeyOps() throws Exception { + @ParameterizedTest(name = "Filesystem Paths Enabled: {0}") + @ValueSource(booleans = {false, true}) + public void testOmBucketReadWriteKeyOps(boolean fsPathsEnabled) throws Exception { try { - startCluster(); + startCluster(fsPathsEnabled); FileOutputStream out = FileUtils.openOutputStream(new File(path, "conf")); cluster.getConf().writeXml(out); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java index 9d4d489586b0..463f44bf058c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java @@ -47,7 +47,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.grpc.server.GrpcLogAppender; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; @@ -95,7 +94,7 @@ public class TestAddRemoveOzoneManager { private void setupCluster(int numInitialOMs) throws Exception { conf = new OzoneConfiguration(); conf.setInt(OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 5); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(SCM_DUMMY_SERVICE_ID) .setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(numInitialOMs) @@ -184,7 +183,6 @@ private List testBootstrapOMs(int numNewOMs) throws Exception { * OM. */ @Test - @Flaky("HDDS-7880") public void testBootstrap() throws Exception { setupCluster(1); OzoneManager oldOM = cluster.getOzoneManager(); @@ -303,7 +301,6 @@ public void testForceBootstrap() throws Exception { config.setInt(OMConfigKeys.OZONE_OM_ADMIN_PROTOCOL_MAX_RETRIES_KEY, 2); config.setInt( OMConfigKeys.OZONE_OM_ADMIN_PROTOCOL_WAIT_BETWEEN_RETRIES_KEY, 100); - cluster.setConf(config); GenericTestUtils.LogCapturer omLog = GenericTestUtils.LogCapturer.captureLogs(OzoneManager.LOG); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java index 37fec8dcda72..97512fec40c0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java @@ -123,7 +123,7 @@ public void testBucketOwner() throws Exception { ozoneBucket.getAcls(); //Add Acls OzoneAcl acl = new OzoneAcl(USER, "testuser", - IAccessAuthorizer.ACLType.ALL, DEFAULT); + DEFAULT, IAccessAuthorizer.ACLType.ALL); ozoneBucket.addAcl(acl); } } @@ -179,7 +179,7 @@ public void testNonBucketNonVolumeOwner() throws Exception { OzoneVolume volume = client.getObjectStore().getVolume("volume1"); OzoneBucket ozoneBucket = volume.getBucket("bucket1"); OzoneAcl acl = new OzoneAcl(USER, "testuser1", - IAccessAuthorizer.ACLType.ALL, DEFAULT); + DEFAULT, IAccessAuthorizer.ACLType.ALL); ozoneBucket.addAcl(acl); }, "Add Acls as non-volume and non-bucket owner should fail"); } @@ -202,7 +202,7 @@ public void testVolumeOwner() throws Exception { ozoneBucket.getAcls(); //Add Acls OzoneAcl acl = new OzoneAcl(USER, "testuser2", - IAccessAuthorizer.ACLType.ALL, DEFAULT); + DEFAULT, IAccessAuthorizer.ACLType.ALL); ozoneBucket.addAcl(acl); //Bucket Delete volume.deleteBucket("bucket2"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 67ab3169b69c..efa2963842d4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -24,7 +24,6 @@ import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -37,6 +36,8 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -44,9 +45,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -87,7 +86,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.OzoneObj; @@ -110,7 +108,9 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; @@ -118,13 +118,17 @@ import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -164,7 +168,7 @@ public class TestKeyManagerImpl { private static final String KEY_NAME = "key1"; private static final String BUCKET_NAME = "bucket1"; private static final String BUCKET2_NAME = "bucket2"; - private static final String VERSIONED_BUCKET_NAME = "versionedBucket1"; + private static final String VERSIONED_BUCKET_NAME = "versionedbucket1"; private static final String VOLUME_NAME = "vol1"; private static OzoneManagerProtocol writeClient; private static OzoneManager om; @@ -176,6 +180,9 @@ public static void setUp() throws Exception { dir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); + final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, + conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class); nodeManager = new MockNodeManager(true, 10); NodeSchema[] schemas = new NodeSchema[] @@ -224,9 +231,6 @@ public static void setUp() throws Exception { new SCMException("SafeModePrecheck failed for allocateBlock", ResultCodes.SAFE_MODE_EXCEPTION)); createVolume(VOLUME_NAME); - createBucket(VOLUME_NAME, BUCKET_NAME, false); - createBucket(VOLUME_NAME, BUCKET2_NAME, false); - createBucket(VOLUME_NAME, VERSIONED_BUCKET_NAME, true); } @AfterAll @@ -237,21 +241,21 @@ public static void cleanup() throws Exception { FileUtils.deleteDirectory(dir); } + @BeforeEach + public void init() throws Exception { + createBucket(VOLUME_NAME, BUCKET_NAME, false); + createBucket(VOLUME_NAME, BUCKET2_NAME, false); + createBucket(VOLUME_NAME, VERSIONED_BUCKET_NAME, true); + } + @AfterEach public void cleanupTest() throws IOException { mockContainerClient(); - List fileStatuses = keyManager - .listStatus(createBuilder().setKeyName("").build(), true, "", 100000); - for (OzoneFileStatus fileStatus : fileStatuses) { - if (fileStatus.isFile()) { - writeClient.deleteKey( - createKeyArgs(fileStatus.getKeyInfo().getKeyName())); - } else { - writeClient.deleteKey(createKeyArgs(OzoneFSUtils - .addTrailingSlashIfNeeded( - fileStatus.getKeyInfo().getKeyName()))); - } - } + org.apache.hadoop.fs.Path volumePath = new org.apache.hadoop.fs.Path(OZONE_URI_DELIMITER, VOLUME_NAME); + FileSystem fs = FileSystem.get(conf); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true); } private static void mockContainerClient() { @@ -527,7 +531,7 @@ public void testPrefixAclOps() throws IOException { .build(); OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", - ACLType.READ, ACCESS); + ACCESS, ACLType.READ); writeClient.addAcl(ozPrefix1, ozAcl1); List ozAclGet = writeClient.getAcl(ozPrefix1); @@ -535,24 +539,13 @@ public void testPrefixAclOps() throws IOException { assertEquals(ozAcl1, ozAclGet.get(0)); List acls = new ArrayList<>(); - OzoneAcl ozAcl2 = new OzoneAcl(ACLIdentityType.USER, "admin", - ACLType.ALL, ACCESS); + OzoneAcl ozAcl2 = new OzoneAcl(ACLIdentityType.USER, "admin", ACCESS, ACLType.ALL); - BitSet rwRights = new BitSet(); - rwRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - rwRights.set(IAccessAuthorizer.ACLType.READ.ordinal()); - OzoneAcl ozAcl3 = new OzoneAcl(ACLIdentityType.GROUP, "dev", - rwRights, ACCESS); + OzoneAcl ozAcl3 = new OzoneAcl(ACLIdentityType.GROUP, "dev", ACCESS, READ, WRITE); - BitSet wRights = new BitSet(); - wRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - OzoneAcl ozAcl4 = new OzoneAcl(ACLIdentityType.GROUP, "dev", - wRights, ACCESS); + OzoneAcl ozAcl4 = new OzoneAcl(ACLIdentityType.GROUP, "dev", ACCESS, WRITE); - BitSet rRights = new BitSet(); - rRights.set(IAccessAuthorizer.ACLType.READ.ordinal()); - OzoneAcl ozAcl5 = new OzoneAcl(ACLIdentityType.GROUP, "dev", - rRights, ACCESS); + OzoneAcl ozAcl5 = new OzoneAcl(ACLIdentityType.GROUP, "dev", ACCESS, READ); acls.add(ozAcl2); acls.add(ozAcl3); @@ -624,7 +617,7 @@ public void testInvalidPrefixAcl() throws IOException { // Invalid prefix not ending with "/" String invalidPrefix = "invalid/pf"; OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", - ACLType.READ, ACCESS); + ACCESS, ACLType.READ); OzoneObj ozInvalidPrefix = new OzoneObjInfo.Builder() .setVolumeName(volumeName) @@ -637,7 +630,7 @@ public void testInvalidPrefixAcl() throws IOException { // add acl with invalid prefix name Exception ex = assertThrows(OMException.class, () -> writeClient.addAcl(ozInvalidPrefix, ozAcl1)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() .setVolumeName(volumeName) @@ -655,7 +648,7 @@ public void testInvalidPrefixAcl() throws IOException { // get acl with invalid prefix name ex = assertThrows(OMException.class, () -> writeClient.getAcl(ozInvalidPrefix)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); // set acl with invalid prefix name List ozoneAcls = new ArrayList(); @@ -663,12 +656,12 @@ public void testInvalidPrefixAcl() throws IOException { ex = assertThrows(OMException.class, () -> writeClient.setAcl(ozInvalidPrefix, ozoneAcls)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); // remove acl with invalid prefix name ex = assertThrows(OMException.class, () -> writeClient.removeAcl(ozInvalidPrefix, ozAcl1)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); } @Test @@ -688,7 +681,7 @@ public void testLongestPrefixPath() throws IOException { .build(); OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", - ACLType.READ, ACCESS); + ACCESS, ACLType.READ); writeClient.addAcl(ozPrefix1, ozAcl1); OzoneObj ozFile1 = new OzoneObjInfo.Builder() @@ -976,12 +969,11 @@ public void testListStatusWithTableCache() throws Exception { if (i % 2 == 0) { // Add to DB OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKeyInDB + i, - 1000L, HddsProtos.ReplicationType.RATIS, - ONE, metadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); } else { // Add to TableCache OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKeyInCache + i, - HddsProtos.ReplicationType.RATIS, ONE, + RatisReplicationConfig.getInstance(ONE), metadataManager); } } @@ -1012,8 +1004,10 @@ public void testListStatusWithTableCache() throws Exception { } } - @Test - public void testListStatusWithTableCacheRecursive() throws Exception { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testListStatusWithTableCacheRecursive(boolean enablePath) throws Exception { + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, enablePath); String keyNameDir1 = "dir1"; OmKeyArgs keyArgsDir1 = createBuilder().setKeyName(keyNameDir1).build(); @@ -1048,13 +1042,12 @@ public void testListStatusWithTableCacheRecursive() throws Exception { OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInDB + i, - 1000L, HddsProtos.ReplicationType.RATIS, - ONE, metadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); } else { // Add to TableCache OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInCache + i, - HddsProtos.ReplicationType.RATIS, ONE, + RatisReplicationConfig.getInstance(ONE), metadataManager); } } @@ -1092,13 +1085,12 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKey + i, - 1000L, HddsProtos.ReplicationType.RATIS, - ONE, metadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); existKeySet.add(prefixKey + i); } else { OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKey + i, - HddsProtos.ReplicationType.RATIS, ONE, + RatisReplicationConfig.getInstance(ONE), metadataManager); String key = metadataManager.getOzoneKey( @@ -1199,8 +1191,10 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { assertTrue(existKeySet.isEmpty()); } - @Test - public void testListStatus() throws IOException { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testListStatus(boolean enablePath) throws IOException { + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, enablePath); String superDir = RandomStringUtils.randomAlphabetic(5); int numDirectories = 5; @@ -1446,8 +1440,7 @@ public void testRefreshPipeline() throws Exception { when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", - "b1", "k1", ReplicationType.RATIS, - ReplicationFactor.THREE); + "b1", "k1", RatisReplicationConfig.getInstance(THREE)).build(); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); @@ -1501,8 +1494,7 @@ public void testRefreshPipelineException() throws Exception { OMPerformanceMetrics metrics = mock(OMPerformanceMetrics.class); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", - "b1", "k1", ReplicationType.RATIS, - ReplicationFactor.THREE); + "b1", "k1", RatisReplicationConfig.getInstance(THREE)).build(); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); @@ -1702,7 +1694,8 @@ private OmKeyArgs.Builder createBuilder(String bucketName) StandaloneReplicationConfig.getInstance(ONE)) .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), ALL, ALL)) - .setVolumeName(VOLUME_NAME); + .setVolumeName(VOLUME_NAME) + .setOwnerName(ugi.getShortUserName()); } private RequestContext currentUserReads() throws IOException { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index 83eac0ab288b..e3bb5b5bccb8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -45,6 +45,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -67,6 +68,7 @@ public class TestKeyPurging { @BeforeEach public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -77,7 +79,6 @@ public void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java index be972557f4a4..204c0ee66818 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java @@ -63,6 +63,8 @@ public class TestListKeys { private static OzoneConfiguration conf; private static OzoneBucket legacyOzoneBucket; + + private static OzoneBucket obsOzoneBucket; private static OzoneClient client; /** @@ -86,6 +88,10 @@ public static void init() throws Exception { legacyOzoneBucket = TestDataUtil .createVolumeAndBucket(client, BucketLayout.LEGACY); + // create a volume and a OBJECT_STORE bucket + obsOzoneBucket = TestDataUtil + .createVolumeAndBucket(client, BucketLayout.OBJECT_STORE); + initFSNameSpace(); } @@ -99,6 +105,7 @@ public static void teardownClass() { private static void initFSNameSpace() throws Exception { buildNameSpaceTree(legacyOzoneBucket); + buildNameSpaceTree(obsOzoneBucket); } /** @@ -108,9 +115,9 @@ private static void initFSNameSpace() throws Exception { * | * a1 * | - * ----------------------------------- - * | | | - * b1 b2 b3 + * -------------------------------------------------------- + * | | | | + * b1 b2 b3 b4 * ------- --------- ----------- * | | | | | | | | * c1 c2 d1 d2 d3 e1 e2 e3 @@ -125,25 +132,27 @@ private static void initFSNameSpace() throws Exception { private static void buildNameSpaceTree(OzoneBucket ozoneBucket) throws Exception { LinkedList keys = new LinkedList<>(); - keys.add("/a1/b1/c1111.tx"); - keys.add("/a1/b1/c1222.tx"); - keys.add("/a1/b1/c1333.tx"); - keys.add("/a1/b1/c1444.tx"); - keys.add("/a1/b1/c1555.tx"); - keys.add("/a1/b1/c1/c1.tx"); - keys.add("/a1/b1/c12/c2.tx"); - keys.add("/a1/b1/c12/c3.tx"); - - keys.add("/a1/b2/d1/d11.tx"); - keys.add("/a1/b2/d2/d21.tx"); - keys.add("/a1/b2/d2/d22.tx"); - keys.add("/a1/b2/d3/d31.tx"); - - keys.add("/a1/b3/e1/e11.tx"); - keys.add("/a1/b3/e2/e21.tx"); - keys.add("/a1/b3/e3/e31.tx"); + keys.add("a1/b1/c1111.tx"); + keys.add("a1/b1/c1222.tx"); + keys.add("a1/b1/c1333.tx"); + keys.add("a1/b1/c1444.tx"); + keys.add("a1/b1/c1555.tx"); + keys.add("a1/b1/c1/c1.tx"); + keys.add("a1/b1/c12/c2.tx"); + keys.add("a1/b1/c12/c3.tx"); + + keys.add("a1/b2/d1/d11.tx"); + keys.add("a1/b2/d2/d21.tx"); + keys.add("a1/b2/d2/d22.tx"); + keys.add("a1/b2/d3/d31.tx"); + + keys.add("a1/b3/e1/e11.tx"); + keys.add("a1/b3/e2/e21.tx"); + keys.add("a1/b3/e3/e31.tx"); createKeys(ozoneBucket, keys); + + ozoneBucket.createDirectory("a1/b4/"); } private static Stream shallowListDataWithTrailingSlash() { @@ -186,6 +195,58 @@ private static Stream shallowListDataWithTrailingSlash() { "a1/b1/c1333.tx", "a1/b1/c1444.tx", "a1/b1/c1555.tx" + ))), + + // Case-7: StartKey is empty, return key that is same as keyPrefix. + of("a1/b4/", "", newLinkedList(Arrays.asList( + "a1/b4/" + ))) + ); + } + + private static Stream shallowListObsDataWithTrailingSlash() { + return Stream.of( + + // Case-1: StartKey is less than prefixKey, return emptyList. + of("a1/b2/", "a1", newLinkedList(Collections.emptyList())), + + // Case-2: StartKey is empty, return all immediate node. + of("a1/b2/", "", newLinkedList(Arrays.asList( + "a1/b2/d1/", + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-3: StartKey is same as prefixKey, return all immediate nodes. + of("a1/b2/", "a1/b2", newLinkedList(Arrays.asList( + "a1/b2/d1/", + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-4: StartKey is greater than prefixKey + of("a1/b2/", "a1/b2/d2/d21.tx", newLinkedList(Arrays.asList( + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-5: StartKey reaches last element, return emptyList + of("a1/b2/", "a1/b2/d3/d31.tx", newLinkedList( + Collections.emptyList() + )), + + // Case-6: Mix result + of("a1/b1/", "a1/b1/c12", newLinkedList(Arrays.asList( + "a1/b1/c12/", + "a1/b1/c1222.tx", + "a1/b1/c1333.tx", + "a1/b1/c1444.tx", + "a1/b1/c1555.tx" + ))), + + // Case-7: StartKey is empty, return key that is same as keyPrefix. + of("a1/b4/", "", newLinkedList(Arrays.asList( + "a1/b4/" ))) ); } @@ -252,6 +313,11 @@ private static Stream shallowListDataWithoutTrailingSlash() { of("a1/b1/c12", "", newLinkedList(Arrays.asList( "a1/b1/c12/", "a1/b1/c1222.tx" + ))), + + // Case-10: + of("a1/b4", "", newLinkedList(Arrays.asList( + "a1/b4/" ))) ); @@ -264,11 +330,19 @@ public void testShallowListKeysWithPrefixTrailingSlash(String keyPrefix, checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket); } + @ParameterizedTest + @MethodSource("shallowListObsDataWithTrailingSlash") + public void testShallowListObsKeysWithPrefixTrailingSlash(String keyPrefix, + String startKey, List expectedKeys) throws Exception { + checkKeyShallowList(keyPrefix, startKey, expectedKeys, obsOzoneBucket); + } + @ParameterizedTest @MethodSource("shallowListDataWithoutTrailingSlash") public void testShallowListKeysWithoutPrefixTrailingSlash(String keyPrefix, String startKey, List expectedKeys) throws Exception { checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, obsOzoneBucket); } private void checkKeyShallowList(String keyPrefix, String startKey, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java index f499e3569c8b..11594f3ef11c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java @@ -63,6 +63,8 @@ public class TestListKeysWithFSO { private static OzoneBucket fsoOzoneBucket; private static OzoneBucket legacyOzoneBucket2; private static OzoneBucket fsoOzoneBucket2; + private static OzoneBucket emptyLegacyOzoneBucket; + private static OzoneBucket emptyFsoOzoneBucket; private static OzoneClient client; /** @@ -105,6 +107,10 @@ public static void init() throws Exception { ozoneVolume.createBucket(fsoBucketName, omBucketArgs); fsoOzoneBucket2 = ozoneVolume.getBucket(fsoBucketName); + fsoBucketName = "bucket" + RandomStringUtils.randomNumeric(5); + ozoneVolume.createBucket(fsoBucketName, omBucketArgs); + emptyFsoOzoneBucket = ozoneVolume.getBucket(fsoBucketName); + builder = BucketArgs.newBuilder(); builder.setStorageType(StorageType.DISK); builder.setBucketLayout(BucketLayout.LEGACY); @@ -113,6 +119,10 @@ public static void init() throws Exception { ozoneVolume.createBucket(legacyBucketName, omBucketArgs); legacyOzoneBucket2 = ozoneVolume.getBucket(legacyBucketName); + legacyBucketName = "bucket" + RandomStringUtils.randomNumeric(5); + ozoneVolume.createBucket(legacyBucketName, omBucketArgs); + emptyLegacyOzoneBucket = ozoneVolume.getBucket(legacyBucketName); + initFSNameSpace(); } @@ -479,6 +489,23 @@ public void testShallowListKeys() throws Exception { expectedKeys = getExpectedKeyShallowList(keyPrefix, startKey, legacyOzoneBucket); checkKeyShallowList(keyPrefix, startKey, expectedKeys, fsoOzoneBucket); + + // case-7: keyPrefix corresponds to multiple existing keys and + // startKey is null in empty bucket + keyPrefix = "a1/b1/c12"; + startKey = null; + // a1/b1/c1222.tx + expectedKeys = + getExpectedKeyShallowList(keyPrefix, startKey, emptyLegacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, emptyFsoOzoneBucket); + + // case-8: keyPrefix corresponds to multiple existing keys and + // startKey is null + keyPrefix = "a1/b1/c12"; + // a1/b1/c1222.tx + expectedKeys = + getExpectedKeyShallowList(keyPrefix, startKey, legacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, fsoOzoneBucket); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java index 52cb9287cc02..20977f9d4834 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java @@ -16,10 +16,10 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -29,24 +29,30 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; +import java.util.stream.Stream; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.apache.hadoop.ozone.OzoneConfigKeys. - OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** * A simple test that asserts that list status output is sorted. */ @Timeout(1200) public class TestListStatus { + private static final Logger LOG = LoggerFactory.getLogger(TestListStatus.class); private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; private static OzoneBucket fsoOzoneBucket; private static OzoneClient client; @@ -54,11 +60,11 @@ public class TestListStatus { * Create a MiniDFSCluster for testing. *

    * - * @throws IOException + * @throws IOException in case of I/O error */ @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); cluster = MiniOzoneCluster.newBuilder(conf).build(); @@ -69,7 +75,7 @@ public static void init() throws Exception { fsoOzoneBucket = TestDataUtil .createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED); - // Set the number of keys to be processed during batch operate. + // Set the number of keys to be processed during batch operated. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); buildNameSpaceTree(fsoOzoneBucket); @@ -83,44 +89,30 @@ public static void teardownClass() { } } - @Test - public void testSortedListStatus() throws Exception { - // a) test if output is sorted - checkKeyList("", "", 1000, 10, false); - - // b) number of keys returns is expected - checkKeyList("", "", 2, 2, false); - - // c) check if full prefix works - checkKeyList("a1", "", 100, 3, false); - - // d) check if full prefix with numEntries work - checkKeyList("a1", "", 2, 2, false); - - // e) check if existing start key >>> - checkKeyList("a1", "a1/a12", 100, 2, false); - - // f) check with non-existing start key - checkKeyList("", "a7", 100, 6, false); - - // g) check if half prefix works - checkKeyList("b", "", 100, 4, true); - - // h) check half prefix with non-existing start key - checkKeyList("b", "b5", 100, 2, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "c", 100, 0, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "b/g5", 100, 4, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "c/g5", 100, 0, true); + @MethodSource("sortedListStatusParametersSource") + @ParameterizedTest(name = "{index} {5}") + public void testSortedListStatus(String keyPrefix, String startKey, int numEntries, int expectedNumKeys, + boolean isPartialPrefix, String testName) throws Exception { + checkKeyList(keyPrefix, startKey, numEntries, expectedNumKeys, isPartialPrefix); + } - // j) check prefix with non-existing prefix key - // and non-existing parent in start key - checkKeyList("a1/a111", "a1/a111/a100", 100, 0, true); + private static Stream sortedListStatusParametersSource() { + return Stream.of( + arguments("", "", 1000, 10, false, "Test if output is sorted"), + arguments("", "", 2, 2, false, "Number of keys returns is expected"), + arguments("a1", "", 100, 3, false, "Check if the full prefix works"), + arguments("a1", "", 2, 2, false, "Check if full prefix with numEntries work"), + arguments("a1", "a1/a12", 100, 2, false, "Check if existing start key >>>"), + arguments("", "a7", 100, 6, false, "Check with a non-existing start key"), + arguments("b", "", 100, 4, true, "Check if half-prefix works"), + arguments("b", "b5", 100, 2, true, "Check half prefix with non-existing start key"), + arguments("b", "c", 100, 0, true, "Check half prefix with non-existing parent in a start key"), + arguments("b", "b/g5", 100, 4, true, "Check half prefix with non-existing parent in a start key"), + arguments("b", "c/g5", 100, 0, true, "Check half prefix with non-existing parent in a start key"), + arguments("a1/a111", "a1/a111/a100", 100, 0, true, "Check prefix with a non-existing prefix key\n" + + " and non-existing parent in a start key"), + arguments("a1/a111", null, 100, 0, true, "Check start key is null") + ); } private static void createFile(OzoneBucket bucket, String keyName) @@ -131,6 +123,7 @@ private static void createFile(OzoneBucket bucket, String keyName) oos.flush(); } } + private static void buildNameSpaceTree(OzoneBucket ozoneBucket) throws Exception { /* @@ -172,33 +165,29 @@ private static void buildNameSpaceTree(OzoneBucket ozoneBucket) createFile(ozoneBucket, "/b8"); } - private void checkKeyList(String keyPrefix, String startKey, - long numEntries, int expectedNumKeys, - boolean isPartialPrefix) - throws Exception { + private void checkKeyList(String keyPrefix, String startKey, long numEntries, int expectedNumKeys, + boolean isPartialPrefix) throws Exception { List statuses = fsoOzoneBucket.listStatus(keyPrefix, false, startKey, numEntries, isPartialPrefix); assertEquals(expectedNumKeys, statuses.size()); - System.out.println("BEGIN:::keyPrefix---> " + keyPrefix + ":::---> " + - startKey); + LOG.info("BEGIN:::keyPrefix---> {} :::---> {}", keyPrefix, startKey); for (int i = 0; i < statuses.size() - 1; i++) { OzoneFileStatus stCurr = statuses.get(i); OzoneFileStatus stNext = statuses.get(i + 1); - System.out.println("status:" + stCurr); + LOG.info("status: {}", stCurr); assertThat(stCurr.getPath().compareTo(stNext.getPath())).isLessThan(0); } if (!statuses.isEmpty()) { OzoneFileStatus stNext = statuses.get(statuses.size() - 1); - System.out.println("status:" + stNext); + LOG.info("status: {}", stNext); } - System.out.println("END:::keyPrefix---> " + keyPrefix + ":::---> " + - startKey); + LOG.info("END:::keyPrefix---> {}:::---> {}", keyPrefix, startKey); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java index b8e115864727..ae97b3f7b907 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java @@ -50,6 +50,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.LAYOUT_VERSION_KEY; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.waitForFinalization; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.maxLayoutVersion; @@ -91,13 +92,13 @@ class TestOMBucketLayoutUpgrade { @BeforeAll void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, fromLayoutVersion); String omServiceId = UUID.randomUUID().toString(); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(3) - .setNumDatanodes(1) - .setOmLayoutVersion(fromLayoutVersion) - .build(); + .setNumDatanodes(1); + cluster = builder.build(); cluster.waitForClusterToBeReady(); ozoneManager = cluster.getOzoneManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index 68ed3536a643..f0f4744e8c91 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -414,13 +414,8 @@ public void testWriteDbDataToStream() throws Exception { Path expectedLog = Paths.get(compactionLogDir, "expected" + COMPACTION_LOG_FILE_NAME_SUFFIX); String expectedLogStr = truncateFileName(metaDirLength, expectedLog); - Path unExpectedLog = Paths.get(compactionLogDir, "unexpected" + - COMPACTION_LOG_FILE_NAME_SUFFIX); - String unExpectedLogStr = truncateFileName(metaDirLength, unExpectedLog); Path expectedSst = Paths.get(sstBackupDir, "expected.sst"); String expectedSstStr = truncateFileName(metaDirLength, expectedSst); - Path unExpectedSst = Paths.get(sstBackupDir, "unexpected.sst"); - String unExpectedSstStr = truncateFileName(metaDirLength, unExpectedSst); // put "expected" fabricated files onto the fs before the files get // copied to the temp dir. @@ -436,15 +431,6 @@ public void testWriteDbDataToStream() throws Exception { // with the snapshot data. doNothing().when(checkpoint).cleanupCheckpoint(); realCheckpoint.set(checkpoint); - - // put "unexpected" fabricated files onto the fs after the files - // get copied to the temp dir. Since these appear in the "real" - // dir after the copy, they shouldn't exist in the final file - // set. That will show that the copy only happened from the temp dir. - Files.write(unExpectedLog, - "fabricatedData".getBytes(StandardCharsets.UTF_8)); - Files.write(unExpectedSst, - "fabricatedData".getBytes(StandardCharsets.UTF_8)); return checkpoint; }); @@ -460,10 +446,6 @@ public void testWriteDbDataToStream() throws Exception { long tmpHardLinkFileCount = tmpHardLinkFileCount(); omDbCheckpointServletMock.doGet(requestMock, responseMock); assertEquals(tmpHardLinkFileCount, tmpHardLinkFileCount()); - - // Verify that tarball request count reaches to zero once doGet completes. - assertEquals(0, - dbStore.getRocksDBCheckpointDiffer().getTarballRequestCount()); dbCheckpoint = realCheckpoint.get(); // Untar the file into a temp folder to be examined. @@ -528,15 +510,7 @@ public void testWriteDbDataToStream() throws Exception { getFiles(Paths.get(metaDir.toString(), OM_SNAPSHOT_DIR), metaDirLength); assertThat(finalFullSet).contains(expectedLogStr); assertThat(finalFullSet).contains(expectedSstStr); - assertThat(initialFullSet).contains(unExpectedLogStr); - assertThat(initialFullSet).contains(unExpectedSstStr); - - // Remove the dummy files that should not have been copied over - // from the expected data. - initialFullSet.remove(unExpectedLogStr); - initialFullSet.remove(unExpectedSstStr); - assertEquals(initialFullSet, finalFullSet, - "expected snapshot files not found"); + assertEquals(initialFullSet, finalFullSet, "expected snapshot files not found"); } private static long tmpHardLinkFileCount() throws IOException { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java index 991b3a66fb03..01ba4db399fb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.ObjectStore; @@ -42,11 +44,13 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OmUtils.EPOCH_ID_SHIFT; import static org.apache.hadoop.ozone.OmUtils.EPOCH_WHEN_RATIS_NOT_ENABLED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests OM epoch generation for when Ratis is not enabled. @@ -145,6 +149,48 @@ public void testUniqueTrxnIndexOnOMRestart() throws Exception { assertEquals(4, om.getLastTrxnIndexForNonRatis()); } + @Test + public void testIncreaseTrxnIndexBasedOnExistingDB() throws Exception { + // Set transactionInfo.getTerm() not -1 to mock the DB migrated from ratis cluster. + // When OM is first started from the existing ratis DB, the transaction index for + // requests should not start from 0. It should incrementally increase from the last + // transaction index which was stored in DB transactionInfoTable before started. + + String volumeName = "volume" + RandomStringUtils.randomNumeric(5); + String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); + String keyName = "key" + RandomStringUtils.randomNumeric(5); + + OzoneManager om = cluster.getOzoneManager(); + ObjectStore objectStore = client.getObjectStore(); + + objectStore.createVolume(volumeName); + OzoneVolume ozoneVolume = objectStore.getVolume(volumeName); + ozoneVolume.createBucket(bucketName); + + Table transactionInfoTable = om.getMetadataManager().getTransactionInfoTable(); + long initIndex = transactionInfoTable.get(TRANSACTION_INFO_KEY).getTransactionIndex(); + // Set transactionInfo.getTerm() = 1 to mock the DB migrated from ratis cluster + transactionInfoTable.put(TRANSACTION_INFO_KEY, TransactionInfo.valueOf(1, initIndex)); + TransactionInfo transactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY); + // Verify transaction term != -1 and index > 1 + assertEquals(1, transactionInfo.getTerm()); + assertTrue(initIndex > 1); + + // Restart the OM and create new object + cluster.restartOzoneManager(); + + String data = "random data"; + OzoneOutputStream ozoneOutputStream = ozoneVolume.getBucket(bucketName).createKey(keyName, data.length()); + ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); + ozoneOutputStream.close(); + + // Transaction index after OM restart is incremented by 2 (create and commit op) from the last + // transaction index before OM restart rather than from 0. + // So, the transactionIndex should be (initIndex + 2) rather than (0 + 2) + assertEquals(initIndex + 2, + om.getMetadataManager().getTransactionInfoTable().get(TRANSACTION_INFO_KEY).getTransactionIndex()); + } + @Test public void testEpochIntegrationInObjectID() throws Exception { // Create a volume and check the objectID has the epoch as diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index f3f0c7d69b9c..bd5046bfc0bf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -155,7 +155,7 @@ public void init(TestInfo testInfo) throws Exception { omRatisConf.setLogAppenderWaitTimeMin(10); conf.setFromObject(omRatisConf); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java index fa84130c9d6f..ccf94bef3c80 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.LAYOUT_VERSION_KEY; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.assertClusterPrepared; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.waitForFinalization; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.maxLayoutVersion; import static org.apache.ozone.test.GenericTestUtils.waitFor; @@ -103,12 +104,12 @@ void testOMUpgradeFinalizationWithOneOMDown() throws Exception { private static MiniOzoneHAClusterImpl newCluster(OzoneConfiguration conf) throws IOException { - return (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(UUID.randomUUID().toString()) + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, INITIAL_VERSION.layoutVersion()); + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(UUID.randomUUID().toString()) .setNumOfOzoneManagers(3) - .setNumDatanodes(1) - .setOmLayoutVersion(INITIAL_VERSION.layoutVersion()) - .build(); + .setNumDatanodes(1); + return builder.build(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java index 2e58b6dbb731..5e3a3aa1980f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java @@ -820,10 +820,7 @@ private void verifyKeyInOpenFileTable(Table openFileTable, } else { OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey); assertNotNull(omKeyInfo, "Table is empty!"); - // used startsWith because the key format is, - // /fileName/ and clientID is not visible. - assertEquals(omKeyInfo.getKeyName(), fileName, - "Invalid Key: " + omKeyInfo.getObjectInfo()); + assertEquals(omKeyInfo.getFileName(), fileName, "Invalid file name: " + omKeyInfo.getObjectInfo()); assertEquals(parentID, omKeyInfo.getParentObjectID(), "Invalid Key"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index be2e0a96526e..9c7a0a7032bc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; @@ -52,6 +53,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.Map; @@ -213,7 +216,8 @@ public void testMultiPartCompleteUpload() throws Exception { } private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( - OzoneBucket bucket, String keyName) throws IOException { + OzoneBucket bucket, String keyName) + throws IOException, NoSuchAlgorithmException { OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); @@ -226,6 +230,9 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) + .digest(data)).toLowerCase()); ozoneOutputStream.close(); if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) { @@ -245,7 +252,7 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( ozoneOutputStream.getCommitUploadPartInfo(); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index 7b8d6653d9b2..247f7e751038 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -104,6 +104,7 @@ public void testAllocateCommit() throws Exception { .setDataSize(1000) .setAcls(new ArrayList<>()) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) + .setOwnerName("user" + RandomStringUtils.randomNumeric(5)) .build(); // 1st update, version 0 diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index 50ff9c36a0a3..e773bf7ed7f2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -50,6 +50,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; @@ -162,6 +165,9 @@ public static void setUp() throws Exception { mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class); mockScmContainerClient = mock(StorageContainerLocationProtocol.class); + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + when(mockScmBlockLocationProtocol.getNetworkTopology()).thenReturn( + factory.newInnerNode("", "", null, NetConstants.ROOT_LEVEL, 1)); OmTestManagers omTestManagers = new OmTestManagers(conf, mockScmBlockLocationProtocol, mockScmContainerClient); @@ -247,10 +253,13 @@ private static void createVolume(String volumeName) throws IOException { } @BeforeEach - public void beforeEach() { + public void beforeEach() throws IOException { CONTAINER_ID.getAndIncrement(); reset(mockScmBlockLocationProtocol, mockScmContainerClient, mockDn1Protocol, mockDn2Protocol); + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + when(mockScmBlockLocationProtocol.getNetworkTopology()).thenReturn( + factory.newInnerNode("", "", null, NetConstants.ROOT_LEVEL, 1)); when(mockDn1Protocol.getPipeline()).thenReturn(createPipeline(DN1)); when(mockDn2Protocol.getPipeline()).thenReturn(createPipeline(DN2)); } @@ -598,18 +607,40 @@ private ContainerProtos.DatanodeBlockID createBlockId(long containerId, private void mockWriteChunkResponse(XceiverClientSpi mockDnProtocol) throws IOException, ExecutionException, InterruptedException { - ContainerCommandResponseProto writeResponse = - ContainerCommandResponseProto.newBuilder() - .setWriteChunk(WriteChunkResponseProto.newBuilder().build()) - .setResult(Result.SUCCESS) - .setCmdType(Type.WriteChunk) - .build(); doAnswer(invocation -> - new XceiverClientReply(completedFuture(writeResponse))) + new XceiverClientReply( + completedFuture( + createWriteChunkResponse( + (ContainerCommandRequestProto)invocation.getArgument(0))))) .when(mockDnProtocol) .sendCommandAsync(argThat(matchCmd(Type.WriteChunk))); } + ContainerCommandResponseProto createWriteChunkResponse( + ContainerCommandRequestProto request) { + ContainerProtos.WriteChunkRequestProto writeChunk = request.getWriteChunk(); + + WriteChunkResponseProto.Builder builder = + WriteChunkResponseProto.newBuilder(); + if (writeChunk.hasBlock()) { + ContainerProtos.BlockData + blockData = writeChunk.getBlock().getBlockData(); + + GetCommittedBlockLengthResponseProto response = + GetCommittedBlockLengthResponseProto.newBuilder() + .setBlockID(blockData.getBlockID()) + .setBlockLength(blockData.getSize()) + .build(); + + builder.setCommittedBlockLength(response); + } + return ContainerCommandResponseProto.newBuilder() + .setWriteChunk(builder.build()) + .setResult(Result.SUCCESS) + .setCmdType(Type.WriteChunk) + .build(); + } + private ArgumentMatcher matchCmd(Type type) { return argument -> argument != null && argument.getCmdType() == type; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 85e7c2a76e5c..4619af1baa29 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -16,53 +16,25 @@ */ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.BUCKET; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; -import static org.apache.ozone.test.MetricsAsserts.assertCounter; -import static org.apache.ozone.test.MetricsAsserts.getMetrics; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.spy; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.ObjectStore; @@ -79,20 +51,48 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; -import org.apache.ozone.test.MetricsAsserts; import org.apache.ozone.test.GenericTestUtils; import org.assertj.core.util.Lists; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.BUCKET; +import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; +import static org.apache.ozone.test.MetricsAsserts.getLongCounter; +import static org.apache.ozone.test.MetricsAsserts.getMetrics; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.spy; + /** * Test for OM metrics. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) public class TestOmMetrics { private MiniOzoneCluster cluster; @@ -106,16 +106,21 @@ public class TestOmMetrics { private final OMException exception = new OMException("dummyException", OMException.ResultCodes.TIMEOUT); private OzoneClient client; - /** * Create a MiniDFSCluster for testing. */ - @BeforeEach + + @BeforeAll public void setup() throws Exception { conf = new OzoneConfiguration(); conf.setTimeDuration(OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL, 1000, TimeUnit.MILLISECONDS); - clusterBuilder = MiniOzoneCluster.newBuilder(conf).withoutDatanodes(); + // Speed up background directory deletion for this test. + conf.setTimeDuration(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000, TimeUnit.MILLISECONDS); + // For testing fs operations with legacy buckets. + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + clusterBuilder = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5); + startCluster(); } private void startCluster() throws Exception { @@ -130,7 +135,7 @@ private void startCluster() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterEach + @AfterAll public void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -140,22 +145,38 @@ public void shutdown() { @Test public void testVolumeOps() throws Exception { - startCluster(); VolumeManager volumeManager = (VolumeManager) HddsWhiteboxTestUtils.getInternalState( ozoneManager, "volumeManager"); VolumeManager mockVm = spy(volumeManager); + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumVolumeOps = getLongCounter("NumVolumeOps", omMetrics); + long initialNumVolumeCreates = getLongCounter("NumVolumeCreates", omMetrics); + long initialNumVolumeUpdates = getLongCounter("NumVolumeUpdates", omMetrics); + long initialNumVolumeInfos = getLongCounter("NumVolumeInfos", omMetrics); + long initialNumVolumeDeletes = getLongCounter("NumVolumeDeletes", omMetrics); + long initialNumVolumeLists = getLongCounter("NumVolumeLists", omMetrics); + long initialNumVolumes = getLongCounter("NumVolumes", omMetrics); + + long initialNumVolumeCreateFails = getLongCounter("NumVolumeCreateFails", omMetrics); + long initialNumVolumeUpdateFails = getLongCounter("NumVolumeUpdateFails", omMetrics); + long initialNumVolumeInfoFails = getLongCounter("NumVolumeInfoFails", omMetrics); + long initialNumVolumeDeleteFails = getLongCounter("NumVolumeDeleteFails", omMetrics); + long initialNumVolumeListFails = getLongCounter("NumVolumeListFails", omMetrics); + OmVolumeArgs volumeArgs = createVolumeArgs(); doVolumeOps(volumeArgs); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumVolumeOps", 5L, omMetrics); - assertCounter("NumVolumeCreates", 1L, omMetrics); - assertCounter("NumVolumeUpdates", 1L, omMetrics); - assertCounter("NumVolumeInfos", 1L, omMetrics); - assertCounter("NumVolumeDeletes", 1L, omMetrics); - assertCounter("NumVolumeLists", 1L, omMetrics); - assertCounter("NumVolumes", 1L, omMetrics); + + omMetrics = getMetrics("OMMetrics"); + assertEquals(initialNumVolumeOps + 5, getLongCounter("NumVolumeOps", omMetrics)); + assertEquals(initialNumVolumeCreates + 1, getLongCounter("NumVolumeCreates", omMetrics)); + assertEquals(initialNumVolumeUpdates + 1, getLongCounter("NumVolumeUpdates", omMetrics)); + assertEquals(initialNumVolumeInfos + 1, getLongCounter("NumVolumeInfos", omMetrics)); + assertEquals(initialNumVolumeDeletes + 1, getLongCounter("NumVolumeDeletes", omMetrics)); + assertEquals(initialNumVolumeLists + 1, getLongCounter("NumVolumeLists", omMetrics)); + assertEquals(initialNumVolumes, getLongCounter("NumVolumes", omMetrics)); volumeArgs = createVolumeArgs(); writeClient.createVolume(volumeArgs); @@ -166,10 +187,8 @@ public void testVolumeOps() throws Exception { writeClient.deleteVolume(volumeArgs.getVolume()); omMetrics = getMetrics("OMMetrics"); - // Accounting 's3v' volume which is created by default. - assertCounter("NumVolumes", 3L, omMetrics); - + assertEquals(initialNumVolumes + 2, getLongCounter("NumVolumes", omMetrics)); // inject exception to test for Failure Metrics on the read path doThrow(exception).when(mockVm).getVolumeInfo(any()); @@ -178,61 +197,78 @@ public void testVolumeOps() throws Exception { HddsWhiteboxTestUtils.setInternalState(ozoneManager, "volumeManager", mockVm); + // inject exception to test for Failure Metrics on the write path - mockWritePathExceptions(OmVolumeArgs.class); + OMMetadataManager metadataManager = mockWritePathExceptions(OmVolumeArgs.class); volumeArgs = createVolumeArgs(); doVolumeOps(volumeArgs); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumVolumeOps", 14L, omMetrics); - assertCounter("NumVolumeCreates", 5L, omMetrics); - assertCounter("NumVolumeUpdates", 2L, omMetrics); - assertCounter("NumVolumeInfos", 2L, omMetrics); - assertCounter("NumVolumeDeletes", 3L, omMetrics); - assertCounter("NumVolumeLists", 2L, omMetrics); - - assertCounter("NumVolumeCreateFails", 1L, omMetrics); - assertCounter("NumVolumeUpdateFails", 1L, omMetrics); - assertCounter("NumVolumeInfoFails", 1L, omMetrics); - assertCounter("NumVolumeDeleteFails", 1L, omMetrics); - assertCounter("NumVolumeListFails", 1L, omMetrics); - - // As last call for volumesOps does not increment numVolumes as those are - // failed. - assertCounter("NumVolumes", 3L, omMetrics); - - cluster.restartOzoneManager(); - assertCounter("NumVolumes", 3L, omMetrics); - - + assertEquals(initialNumVolumeOps + 14, getLongCounter("NumVolumeOps", omMetrics)); + assertEquals(initialNumVolumeCreates + 5, getLongCounter("NumVolumeCreates", omMetrics)); + assertEquals(initialNumVolumeUpdates + 2, getLongCounter("NumVolumeUpdates", omMetrics)); + assertEquals(initialNumVolumeInfos + 2, getLongCounter("NumVolumeInfos", omMetrics)); + assertEquals(initialNumVolumeDeletes + 3, getLongCounter("NumVolumeDeletes", omMetrics)); + assertEquals(initialNumVolumeLists + 2, getLongCounter("NumVolumeLists", omMetrics)); + + assertEquals(initialNumVolumeCreateFails + 1, getLongCounter("NumVolumeCreateFails", omMetrics)); + assertEquals(initialNumVolumeUpdateFails + 1, getLongCounter("NumVolumeUpdateFails", omMetrics)); + assertEquals(initialNumVolumeInfoFails + 1, getLongCounter("NumVolumeInfoFails", omMetrics)); + assertEquals(initialNumVolumeDeleteFails + 1, getLongCounter("NumVolumeDeleteFails", omMetrics)); + assertEquals(initialNumVolumeListFails + 1, getLongCounter("NumVolumeListFails", omMetrics)); + assertEquals(initialNumVolumes + 2, getLongCounter("NumVolumes", omMetrics)); + + // restore state + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "volumeManager", volumeManager); + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "metadataManager", metadataManager); } @Test public void testBucketOps() throws Exception { - startCluster(); BucketManager bucketManager = (BucketManager) HddsWhiteboxTestUtils.getInternalState( ozoneManager, "bucketManager"); BucketManager mockBm = spy(bucketManager); + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumBucketOps = getLongCounter("NumBucketOps", omMetrics); + long initialNumBucketCreates = getLongCounter("NumBucketCreates", omMetrics); + long initialNumBucketUpdates = getLongCounter("NumBucketUpdates", omMetrics); + long initialNumBucketInfos = getLongCounter("NumBucketInfos", omMetrics); + long initialNumBucketDeletes = getLongCounter("NumBucketDeletes", omMetrics); + long initialNumBucketLists = getLongCounter("NumBucketLists", omMetrics); + long initialNumBuckets = getLongCounter("NumBuckets", omMetrics); + long initialEcBucketCreateTotal = getLongCounter("EcBucketCreateTotal", omMetrics); + long initialEcBucketCreateFailsTotal = getLongCounter("EcBucketCreateFailsTotal", omMetrics); + + long initialNumBucketCreateFails = getLongCounter("NumBucketCreateFails", omMetrics); + long initialNumBucketUpdateFails = getLongCounter("NumBucketUpdateFails", omMetrics); + long initialNumBucketInfoFails = getLongCounter("NumBucketInfoFails", omMetrics); + long initialNumBucketDeleteFails = getLongCounter("NumBucketDeleteFails", omMetrics); + long initialNumBucketListFails = getLongCounter("NumBucketListFails", omMetrics); + OmBucketInfo bucketInfo = createBucketInfo(false); doBucketOps(bucketInfo); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumBucketOps", 5L, omMetrics); - assertCounter("NumBucketCreates", 1L, omMetrics); - assertCounter("NumBucketUpdates", 1L, omMetrics); - assertCounter("NumBucketInfos", 1L, omMetrics); - assertCounter("NumBucketDeletes", 1L, omMetrics); - assertCounter("NumBucketLists", 1L, omMetrics); - assertCounter("NumBuckets", 0L, omMetrics); + omMetrics = getMetrics("OMMetrics"); + assertEquals(initialNumBucketOps + 5, getLongCounter("NumBucketOps", omMetrics)); + assertEquals(initialNumBucketCreates + 1, getLongCounter("NumBucketCreates", omMetrics)); + assertEquals(initialNumBucketUpdates + 1, getLongCounter("NumBucketUpdates", omMetrics)); + assertEquals(initialNumBucketInfos + 1, getLongCounter("NumBucketInfos", omMetrics)); + assertEquals(initialNumBucketDeletes + 1, getLongCounter("NumBucketDeletes", omMetrics)); + assertEquals(initialNumBucketLists + 1, getLongCounter("NumBucketLists", omMetrics)); + assertEquals(initialNumBuckets, getLongCounter("NumBuckets", omMetrics)); OmBucketInfo ecBucketInfo = createBucketInfo(true); writeClient.createBucket(ecBucketInfo); writeClient.deleteBucket(ecBucketInfo.getVolumeName(), ecBucketInfo.getBucketName()); + omMetrics = getMetrics("OMMetrics"); - assertCounter("EcBucketCreateTotal", 1L, omMetrics); + assertEquals(initialEcBucketCreateTotal + 1, getLongCounter("EcBucketCreateTotal", omMetrics)); bucketInfo = createBucketInfo(false); writeClient.createBucket(bucketInfo); @@ -244,7 +280,7 @@ public void testBucketOps() throws Exception { bucketInfo.getBucketName()); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumBuckets", 2L, omMetrics); + assertEquals(initialNumBuckets + 2, getLongCounter("NumBuckets", omMetrics)); // inject exception to test for Failure Metrics on the read path doThrow(exception).when(mockBm).getBucketInfo(any(), any()); @@ -255,7 +291,7 @@ public void testBucketOps() throws Exception { ozoneManager, "bucketManager", mockBm); // inject exception to test for Failure Metrics on the write path - mockWritePathExceptions(OmBucketInfo.class); + OMMetadataManager metadataManager = mockWritePathExceptions(OmBucketInfo.class); doBucketOps(bucketInfo); ecBucketInfo = createBucketInfo(true); @@ -265,62 +301,81 @@ public void testBucketOps() throws Exception { //Expected failure } omMetrics = getMetrics("OMMetrics"); - assertCounter("EcBucketCreateFailsTotal", 1L, omMetrics); - - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumBucketOps", 17L, omMetrics); - assertCounter("NumBucketCreates", 7L, omMetrics); - assertCounter("NumBucketUpdates", 2L, omMetrics); - assertCounter("NumBucketInfos", 2L, omMetrics); - assertCounter("NumBucketDeletes", 4L, omMetrics); - assertCounter("NumBucketLists", 2L, omMetrics); - - assertCounter("NumBucketCreateFails", 2L, omMetrics); - assertCounter("NumBucketUpdateFails", 1L, omMetrics); - assertCounter("NumBucketInfoFails", 1L, omMetrics); - assertCounter("NumBucketDeleteFails", 1L, omMetrics); - assertCounter("NumBucketListFails", 1L, omMetrics); - - assertCounter("NumBuckets", 2L, omMetrics); - - cluster.restartOzoneManager(); - assertCounter("NumBuckets", 2L, omMetrics); + assertEquals(initialEcBucketCreateFailsTotal + 1, getLongCounter("EcBucketCreateFailsTotal", omMetrics)); + assertEquals(initialNumBucketOps + 17, getLongCounter("NumBucketOps", omMetrics)); + assertEquals(initialNumBucketCreates + 7, getLongCounter("NumBucketCreates", omMetrics)); + assertEquals(initialNumBucketUpdates + 2, getLongCounter("NumBucketUpdates", omMetrics)); + assertEquals(initialNumBucketInfos + 2, getLongCounter("NumBucketInfos", omMetrics)); + assertEquals(initialNumBucketDeletes + 4, getLongCounter("NumBucketDeletes", omMetrics)); + assertEquals(initialNumBucketLists + 2, getLongCounter("NumBucketLists", omMetrics)); + + assertEquals(initialNumBucketCreateFails + 2, getLongCounter("NumBucketCreateFails", omMetrics)); + assertEquals(initialNumBucketUpdateFails + 1, getLongCounter("NumBucketUpdateFails", omMetrics)); + assertEquals(initialNumBucketInfoFails + 1, getLongCounter("NumBucketInfoFails", omMetrics)); + assertEquals(initialNumBucketDeleteFails + 1, getLongCounter("NumBucketDeleteFails", omMetrics)); + assertEquals(initialNumBucketListFails + 1, getLongCounter("NumBucketListFails", omMetrics)); + assertEquals(initialNumBuckets + 2, getLongCounter("NumBuckets", omMetrics)); + + // restore state + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "bucketManager", bucketManager); + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "metadataManager", metadataManager); } @Test public void testKeyOps() throws Exception { - // This test needs a cluster with DNs and SCM to wait on safemode - clusterBuilder.setNumDatanodes(5); - conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, true); - startCluster(); String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); KeyManager keyManager = (KeyManager) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); KeyManager mockKm = spy(keyManager); + + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumKeyOps = getLongCounter("NumKeyOps", omMetrics); + long initialNumKeyAllocate = getLongCounter("NumKeyAllocate", omMetrics); + long initialNumKeyLookup = getLongCounter("NumKeyLookup", omMetrics); + long initialNumKeyDeletes = getLongCounter("NumKeyDeletes", omMetrics); + long initialNumKeyLists = getLongCounter("NumKeyLists", omMetrics); + long initialNumTrashKeyLists = getLongCounter("NumTrashKeyLists", omMetrics); + long initialNumKeys = getLongCounter("NumKeys", omMetrics); + long initialNumInitiateMultipartUploads = getLongCounter("NumInitiateMultipartUploads", omMetrics); + + long initialEcKeyCreateTotal = getLongCounter("EcKeyCreateTotal", omMetrics); + long initialNumKeyAllocateFails = getLongCounter("NumKeyAllocateFails", omMetrics); + long initialNumKeyLookupFails = getLongCounter("NumKeyLookupFails", omMetrics); + long initialNumKeyDeleteFails = getLongCounter("NumKeyDeleteFails", omMetrics); + long initialNumTrashKeyListFails = getLongCounter("NumTrashKeyListFails", omMetrics); + long initialNumInitiateMultipartUploadFails = getLongCounter("NumInitiateMultipartUploadFails", omMetrics); + long initialNumBlockAllocationFails = getLongCounter("NumBlockAllocationFails", omMetrics); + long initialNumKeyListFails = getLongCounter("NumKeyListFails", omMetrics); + long initialEcKeyCreateFailsTotal = getLongCounter("EcKeyCreateFailsTotal", omMetrics); + // see HDDS-10078 for making this work with FILE_SYSTEM_OPTIMIZED layout TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); doKeyOps(keyArgs); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeyOps", 8L, omMetrics); - assertCounter("NumKeyAllocate", 1L, omMetrics); - assertCounter("NumKeyLookup", 1L, omMetrics); - assertCounter("NumKeyDeletes", 1L, omMetrics); - assertCounter("NumKeyLists", 1L, omMetrics); - assertCounter("NumTrashKeyLists", 1L, omMetrics); - assertCounter("NumKeys", 0L, omMetrics); - assertCounter("NumInitiateMultipartUploads", 1L, omMetrics); - assertCounter("NumListOpenFiles", 1L, omMetrics); + omMetrics = getMetrics("OMMetrics"); + + assertEquals(initialNumKeyOps + 8, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyAllocate + 1, getLongCounter("NumKeyAllocate", omMetrics)); + assertEquals(initialNumKeyLookup + 1, getLongCounter("NumKeyLookup", omMetrics)); + assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyLists + 1, getLongCounter("NumKeyLists", omMetrics)); + assertEquals(initialNumTrashKeyLists + 1, getLongCounter("NumTrashKeyLists", omMetrics)); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumInitiateMultipartUploads + 1, getLongCounter("NumInitiateMultipartUploads", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); doKeyOps(keyArgs); + omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeyOps", 16L, omMetrics); - assertCounter("EcKeyCreateTotal", 1L, omMetrics); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialEcKeyCreateTotal + 1, getLongCounter("EcKeyCreateTotal", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); @@ -347,8 +402,8 @@ public void testKeyOps() throws Exception { } omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 2L, omMetrics); - assertCounter("NumBlockAllocationFails", 1L, omMetrics); + assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumBlockAllocationFails + 1, getLongCounter("NumBlockAllocationFails", omMetrics)); // inject exception to test for Failure Metrics on the read path doThrow(exception).when(mockKm).lookupKey(any(), any(), any()); @@ -365,27 +420,28 @@ public void testKeyOps() throws Exception { omMetadataReader, "keyManager", mockKm); // inject exception to test for Failure Metrics on the write path - mockWritePathExceptions(OmBucketInfo.class); + OMMetadataManager metadataManager = mockWritePathExceptions(OmBucketInfo.class); keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeyOps", 31L, omMetrics); - assertCounter("NumKeyAllocate", 6L, omMetrics); - assertCounter("NumKeyLookup", 3L, omMetrics); - assertCounter("NumKeyDeletes", 4L, omMetrics); - assertCounter("NumKeyLists", 3L, omMetrics); - assertCounter("NumTrashKeyLists", 3L, omMetrics); - assertCounter("NumInitiateMultipartUploads", 3L, omMetrics); - - assertCounter("NumKeyAllocateFails", 1L, omMetrics); - assertCounter("NumKeyLookupFails", 1L, omMetrics); - assertCounter("NumKeyDeleteFails", 1L, omMetrics); - assertCounter("NumKeyListFails", 1L, omMetrics); - assertCounter("NumTrashKeyListFails", 1L, omMetrics); - assertCounter("NumInitiateMultipartUploadFails", 1L, omMetrics); - assertCounter("NumKeys", 2L, omMetrics); + assertEquals(initialNumKeyOps + 31, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyAllocate + 6, getLongCounter("NumKeyAllocate", omMetrics)); + assertEquals(initialNumKeyLookup + 3, getLongCounter("NumKeyLookup", omMetrics)); + assertEquals(initialNumKeyDeletes + 4, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyLists + 3, getLongCounter("NumKeyLists", omMetrics)); + assertEquals(initialNumTrashKeyLists + 3, getLongCounter("NumTrashKeyLists", omMetrics)); + assertEquals(initialNumInitiateMultipartUploads + 3, getLongCounter("NumInitiateMultipartUploads", omMetrics)); + + assertEquals(initialNumKeyAllocateFails + 1, getLongCounter("NumKeyAllocateFails", omMetrics)); + assertEquals(initialNumKeyLookupFails + 1, getLongCounter("NumKeyLookupFails", omMetrics)); + assertEquals(initialNumKeyDeleteFails + 1, getLongCounter("NumKeyDeleteFails", omMetrics)); + assertEquals(initialNumKeyListFails + 1, getLongCounter("NumKeyListFails", omMetrics)); + assertEquals(initialNumTrashKeyListFails + 1, getLongCounter("NumTrashKeyListFails", omMetrics)); + assertEquals(initialNumInitiateMultipartUploadFails + 1, getLongCounter( + "NumInitiateMultipartUploadFails", omMetrics)); + assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); @@ -396,24 +452,24 @@ public void testKeyOps() throws Exception { //Expected Failure } omMetrics = getMetrics("OMMetrics"); - assertCounter("EcKeyCreateFailsTotal", 1L, omMetrics); - - cluster.restartOzoneManager(); - assertCounter("NumKeys", 2L, omMetrics); + assertEquals(initialEcKeyCreateFailsTotal + 1, getLongCounter("EcKeyCreateFailsTotal", omMetrics)); + // restore state + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "keyManager", keyManager); + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "metadataManager", metadataManager); } @ParameterizedTest @EnumSource(value = BucketLayout.class, names = {"FILE_SYSTEM_OPTIMIZED", "LEGACY"}) public void testDirectoryOps(BucketLayout bucketLayout) throws Exception { - clusterBuilder.setNumDatanodes(3); - conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, true); - // Speed up background directory deletion for this test. - conf.setTimeDuration(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); - conf.set(OzoneConfigKeys.OZONE_CLIENT_FS_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); - // For testing fs operations with legacy buckets. - conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - startCluster(); + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumKeys = getLongCounter("NumKeys", omMetrics); + long initialNumCreateDirectory = getLongCounter("NumCreateDirectory", omMetrics); + long initialNumKeyDeletes = getLongCounter("NumKeyDeletes", omMetrics); + long initialNumKeyRenames = getLongCounter("NumKeyRenames", omMetrics); // How long to wait for directory deleting service to clean up the files before aborting the test. final int timeoutMillis = @@ -424,13 +480,8 @@ public void testDirectoryOps(BucketLayout bucketLayout) throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - // Cluster should be empty. - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 0L, omMetrics); - assertCounter("NumCreateDirectory", 0L, omMetrics); - // These key operations include directory operations. - assertCounter("NumKeyDeletes", 0L, omMetrics); - assertCounter("NumKeyRenames", 0L, omMetrics); + // create bucket with different layout in each ParameterizedTest + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout); // Create bucket with 2 nested directories. String rootPath = String.format("%s://%s/", @@ -443,72 +494,81 @@ public void testDirectoryOps(BucketLayout bucketLayout) throws Exception { assertEquals(bucketLayout, client.getObjectStore().getVolume(volumeName).getBucket(bucketName).getBucketLayout()); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 2L, omMetrics); + assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); // Only one directory create command is given, even though it created two directories. - assertCounter("NumCreateDirectory", 1L, omMetrics); - assertCounter("NumKeyDeletes", 0L, omMetrics); - assertCounter("NumKeyRenames", 0L, omMetrics); + assertEquals(initialNumCreateDirectory + 1, getLongCounter("NumCreateDirectory", omMetrics)); + assertEquals(initialNumKeyDeletes, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyRenames, getLongCounter("NumKeyRenames", omMetrics)); + // Add 2 files at different parts of the tree. ContractTestUtils.touch(fs, new Path(dirPath, "file1")); ContractTestUtils.touch(fs, new Path(dirPath.getParent(), "file2")); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 4L, omMetrics); - assertCounter("NumCreateDirectory", 1L, omMetrics); - assertCounter("NumKeyDeletes", 0L, omMetrics); - assertCounter("NumKeyRenames", 0L, omMetrics); + assertEquals(initialNumKeys + 4, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumCreateDirectory + 1, getLongCounter("NumCreateDirectory", omMetrics)); + assertEquals(initialNumKeyDeletes, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyRenames, getLongCounter("NumKeyRenames", omMetrics)); // Rename the child directory. fs.rename(dirPath, new Path(dirPath.getParent(), "new-name")); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 4L, omMetrics); - assertCounter("NumCreateDirectory", 1L, omMetrics); - assertCounter("NumKeyDeletes", 0L, omMetrics); + assertEquals(initialNumKeys + 4, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumCreateDirectory + 1, getLongCounter("NumCreateDirectory", omMetrics)); + assertEquals(initialNumKeyDeletes, getLongCounter("NumKeyDeletes", omMetrics)); long expectedRenames = 1; if (bucketLayout == BucketLayout.LEGACY) { // Legacy bucket must rename keys individually. expectedRenames = 2; } - assertCounter("NumKeyRenames", expectedRenames, omMetrics); + assertEquals(initialNumKeyRenames + expectedRenames, getLongCounter("NumKeyRenames", omMetrics)); // Delete metric should be decremented by directory deleting service in the background. fs.delete(dirPath.getParent(), true); GenericTestUtils.waitFor(() -> { - long keyCount = MetricsAsserts.getLongCounter("NumKeys", getMetrics("OMMetrics")); + long keyCount = getLongCounter("NumKeys", getMetrics("OMMetrics")); return keyCount == 0; }, timeoutMillis / 5, timeoutMillis); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 0L, omMetrics); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); // This is the number of times the create directory command was given, not the current number of directories. - assertCounter("NumCreateDirectory", 1L, omMetrics); + assertEquals(initialNumCreateDirectory + 1, getLongCounter("NumCreateDirectory", omMetrics)); // Directory delete counts as key delete. One command was given so the metric is incremented once. - assertCounter("NumKeyDeletes", 1L, omMetrics); - assertCounter("NumKeyRenames", expectedRenames, omMetrics); + assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyRenames + expectedRenames, getLongCounter("NumKeyRenames", omMetrics)); // Re-create the same tree as before, but this time delete the bucket recursively. // All metrics should still be properly updated. fs.mkdirs(dirPath); ContractTestUtils.touch(fs, new Path(dirPath, "file1")); ContractTestUtils.touch(fs, new Path(dirPath.getParent(), "file2")); - assertCounter("NumKeys", 4L, getMetrics("OMMetrics")); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); fs.delete(bucketPath, true); GenericTestUtils.waitFor(() -> { - long keyCount = MetricsAsserts.getLongCounter("NumKeys", getMetrics("OMMetrics")); + long keyCount = getLongCounter("NumKeys", getMetrics("OMMetrics")); return keyCount == 0; }, timeoutMillis / 5, timeoutMillis); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 0L, omMetrics); - assertCounter("NumCreateDirectory", 2L, omMetrics); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumCreateDirectory + 2, getLongCounter("NumCreateDirectory", omMetrics)); // One more keys delete request is given as part of the bucket delete to do a batch delete of its keys. - assertCounter("NumKeyDeletes", 2L, omMetrics); - assertCounter("NumKeyRenames", expectedRenames, omMetrics); + assertEquals(initialNumKeyDeletes + 2, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyRenames + expectedRenames, getLongCounter("NumKeyRenames", omMetrics)); } @Test public void testSnapshotOps() throws Exception { // This tests needs enough dataNodes to allocate the blocks for the keys. - clusterBuilder.setNumDatanodes(3); - startCluster(); + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumSnapshotCreateFails = getLongCounter("NumSnapshotCreateFails", omMetrics); + long initialNumSnapshotCreates = getLongCounter("NumSnapshotCreates", omMetrics); + long initialNumSnapshotListFails = getLongCounter("NumSnapshotListFails", omMetrics); + long initialNumSnapshotLists = getLongCounter("NumSnapshotLists", omMetrics); + long initialNumSnapshotActive = getLongCounter("NumSnapshotActive", omMetrics); + long initialNumSnapshotDeleted = getLongCounter("NumSnapshotDeleted", omMetrics); + long initialNumSnapshotDiffJobs = getLongCounter("NumSnapshotDiffJobs", omMetrics); + long initialNumSnapshotDiffJobFails = getLongCounter("NumSnapshotDiffJobFails", omMetrics); OmBucketInfo omBucketInfo = createBucketInfo(false); @@ -528,16 +588,15 @@ public void testSnapshotOps() throws Exception { // Create first snapshot writeClient.createSnapshot(volumeName, bucketName, snapshot1); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - - assertCounter("NumSnapshotCreateFails", 0L, omMetrics); - assertCounter("NumSnapshotCreates", 1L, omMetrics); - assertCounter("NumSnapshotListFails", 0L, omMetrics); - assertCounter("NumSnapshotLists", 0L, omMetrics); - assertCounter("NumSnapshotActive", 1L, omMetrics); - assertCounter("NumSnapshotDeleted", 0L, omMetrics); - assertCounter("NumSnapshotDiffJobs", 0L, omMetrics); - assertCounter("NumSnapshotDiffJobFails", 0L, omMetrics); + omMetrics = getMetrics("OMMetrics"); + assertEquals(initialNumSnapshotCreateFails, getLongCounter("NumSnapshotCreateFails", omMetrics)); + assertEquals(initialNumSnapshotCreates + 1, getLongCounter("NumSnapshotCreates", omMetrics)); + assertEquals(initialNumSnapshotListFails, getLongCounter("NumSnapshotListFails", omMetrics)); + assertEquals(initialNumSnapshotLists, getLongCounter("NumSnapshotLists", omMetrics)); + assertEquals(initialNumSnapshotActive + 1, getLongCounter("NumSnapshotActive", omMetrics)); + assertEquals(initialNumSnapshotDeleted, getLongCounter("NumSnapshotDeleted", omMetrics)); + assertEquals(initialNumSnapshotDiffJobs, getLongCounter("NumSnapshotDiffJobs", omMetrics)); + assertEquals(initialNumSnapshotDiffJobFails, getLongCounter("NumSnapshotDiffJobFails", omMetrics)); // Create second key OmKeyArgs keyArgs2 = createKeyArgs(volumeName, bucketName, @@ -560,35 +619,28 @@ public void testSnapshotOps() throws Exception { } } omMetrics = getMetrics("OMMetrics"); - assertCounter("NumSnapshotDiffJobs", 1L, omMetrics); - assertCounter("NumSnapshotDiffJobFails", 0L, omMetrics); + assertEquals(initialNumSnapshotDiffJobs + 1, getLongCounter("NumSnapshotDiffJobs", omMetrics)); + assertEquals(initialNumSnapshotDiffJobFails, getLongCounter("NumSnapshotDiffJobFails", omMetrics)); // List snapshots writeClient.listSnapshot( volumeName, bucketName, null, null, Integer.MAX_VALUE); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumSnapshotActive", 2L, omMetrics); - assertCounter("NumSnapshotCreates", 2L, omMetrics); - assertCounter("NumSnapshotLists", 1L, omMetrics); - assertCounter("NumSnapshotListFails", 0L, omMetrics); + assertEquals(initialNumSnapshotActive + 2, getLongCounter("NumSnapshotActive", omMetrics)); + assertEquals(initialNumSnapshotCreates + 2, getLongCounter("NumSnapshotCreates", omMetrics)); + assertEquals(initialNumSnapshotListFails, getLongCounter("NumSnapshotListFails", omMetrics)); + assertEquals(initialNumSnapshotLists + 1, getLongCounter("NumSnapshotLists", omMetrics)); // List snapshot: invalid bucket case. assertThrows(OMException.class, () -> writeClient.listSnapshot(volumeName, "invalidBucket", null, null, Integer.MAX_VALUE)); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumSnapshotLists", 2L, omMetrics); - assertCounter("NumSnapshotListFails", 1L, omMetrics); - - // restart OM - cluster.restartOzoneManager(); - - // Check number of active snapshots in the snapshot table - // is the same after OM restart - assertCounter("NumSnapshotActive", 2L, omMetrics); + assertEquals(initialNumSnapshotLists + 2, getLongCounter("NumSnapshotLists", omMetrics)); + assertEquals(initialNumSnapshotListFails + 1, getLongCounter("NumSnapshotListFails", omMetrics)); } - private void mockWritePathExceptions(Classklass) throws Exception { + private OMMetadataManager mockWritePathExceptions(Classklass) throws Exception { String tableName; if (klass == OmBucketInfo.class) { tableName = "bucketTable"; @@ -610,71 +662,63 @@ private void mockWritePathExceptions(Classklass) throws Exception { } HddsWhiteboxTestUtils.setInternalState( ozoneManager, "metadataManager", mockMm); + + // Return the original metadataManager so it can be restored later + return metadataManager; } @Test public void testAclOperations() throws Exception { - startCluster(); - try { - // Create a volume. - client.getObjectStore().createVolume("volumeacl"); - - OzoneObj volObj = new OzoneObjInfo.Builder().setVolumeName("volumeacl") - .setResType(VOLUME).setStoreType(OZONE).build(); - - // Test getAcl - List acls = ozoneManager.getAcl(volObj); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumGetAcl", 1L, omMetrics); - - // Test addAcl - writeClient.addAcl(volObj, - new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, "ozoneuser", - IAccessAuthorizer.ACLType.ALL, ACCESS)); - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumAddAcl", 1L, omMetrics); - - // Test setAcl - writeClient.setAcl(volObj, acls); - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumSetAcl", 1L, omMetrics); - - // Test removeAcl - writeClient.removeAcl(volObj, acls.get(0)); - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumRemoveAcl", 1L, omMetrics); - - } finally { - client.getObjectStore().deleteVolume("volumeacl"); - } + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumGetAcl = getLongCounter("NumGetAcl", omMetrics); + long initialNumAddAcl = getLongCounter("NumAddAcl", omMetrics); + long initialNumSetAcl = getLongCounter("NumSetAcl", omMetrics); + long initialNumRemoveAcl = getLongCounter("NumRemoveAcl", omMetrics); + // Create a volume. + client.getObjectStore().createVolume("volumeacl"); + + OzoneObj volObj = new OzoneObjInfo.Builder().setVolumeName("volumeacl") + .setResType(VOLUME).setStoreType(OZONE).build(); + + // Test getAcl, addAcl, setAcl, removeAcl + List acls = ozoneManager.getAcl(volObj); + writeClient.addAcl(volObj, + new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, "ozoneuser", + ACCESS, IAccessAuthorizer.ACLType.ALL)); + writeClient.setAcl(volObj, acls); + writeClient.removeAcl(volObj, acls.get(0)); + + omMetrics = getMetrics("OMMetrics"); + assertEquals(initialNumGetAcl + 1, getLongCounter("NumGetAcl", omMetrics)); + assertEquals(initialNumAddAcl + 1, getLongCounter("NumAddAcl", omMetrics)); + assertEquals(initialNumSetAcl + 1, getLongCounter("NumSetAcl", omMetrics)); + assertEquals(initialNumRemoveAcl + 1, getLongCounter("NumRemoveAcl", omMetrics)); + + client.getObjectStore().deleteVolume("volumeacl"); } @Test public void testAclOperationsHA() throws Exception { - // This test needs a cluster with DNs and SCM to wait on safemode - clusterBuilder.setNumDatanodes(3); - conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, true); - startCluster(); - ObjectStore objectStore = client.getObjectStore(); // Create a volume. - objectStore.createVolume("volumeacl"); + objectStore.createVolume("volumeaclha"); // Create a bucket. - objectStore.getVolume("volumeacl").createBucket("bucketacl"); + objectStore.getVolume("volumeaclha").createBucket("bucketaclha"); // Create a key. - objectStore.getVolume("volumeacl").getBucket("bucketacl") - .createKey("keyacl", 0).close(); + objectStore.getVolume("volumeaclha").getBucket("bucketaclha") + .createKey("keyaclha", 0).close(); OzoneObj volObj = - new OzoneObjInfo.Builder().setVolumeName("volumeacl").setResType(VOLUME) + new OzoneObjInfo.Builder().setVolumeName("volumeaclha").setResType(VOLUME) .setStoreType(OZONE).build(); - OzoneObj buckObj = new OzoneObjInfo.Builder().setVolumeName("volumeacl") - .setBucketName("bucketacl").setResType(BUCKET).setStoreType(OZONE) + OzoneObj buckObj = new OzoneObjInfo.Builder().setVolumeName("volumeaclha") + .setBucketName("bucketaclha").setResType(BUCKET).setStoreType(OZONE) .build(); - OzoneObj keyObj = new OzoneObjInfo.Builder().setVolumeName("volumeacl") - .setBucketName("bucketacl").setResType(BUCKET).setKeyName("keyacl") + OzoneObj keyObj = new OzoneObjInfo.Builder().setVolumeName("volumeaclha") + .setBucketName("bucketaclha").setResType(BUCKET).setKeyName("keyaclha") .setStoreType(OZONE).build(); List acls = ozoneManager.getAcl(volObj); @@ -696,7 +740,7 @@ private void testAclMetricsInternal(ObjectStore objectStore, OzoneObj volObj, long initialValue = metrics.getNumAddAcl(); objectStore.addAcl(volObj, new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, "ozoneuser", - IAccessAuthorizer.ACLType.ALL, ACCESS)); + ACCESS, IAccessAuthorizer.ACLType.ALL)); assertEquals(initialValue + 1, metrics.getNumAddAcl()); @@ -795,13 +839,13 @@ private void doKeyOps(OmKeyArgs keyArgs) { try { ozoneManager.listKeys(keyArgs.getVolumeName(), - keyArgs.getBucketName(), null, null, 0); + keyArgs.getBucketName(), null, null, 0); } catch (IOException ignored) { } try { ozoneManager.listTrash(keyArgs.getVolumeName(), - keyArgs.getBucketName(), null, null, 0); + keyArgs.getBucketName(), null, null, 0); } catch (IOException ignored) { } @@ -837,6 +881,7 @@ private OmKeyArgs createKeyArgs(String volumeName, String bucketName, .setKeyName(keyName) .setAcls(Lists.emptyList()) .setReplicationConfig(repConfig) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index 454019b4a8a4..ba0dabf47dd5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.om.ha.HadoopRpcOMFailoverProxyProvider; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; @@ -47,6 +48,7 @@ import java.io.IOException; import java.net.ConnectException; import java.time.Duration; +import java.util.Collections; import java.util.Iterator; import java.util.UUID; import java.util.HashMap; @@ -61,6 +63,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; import static org.assertj.core.api.Assertions.assertThat; @@ -76,7 +79,6 @@ public abstract class TestOzoneManagerHA { private static MiniOzoneHAClusterImpl cluster = null; - private static MiniOzoneCluster.Builder clusterBuilder = null; private static ObjectStore objectStore; private static OzoneConfiguration conf; private static String omServiceId; @@ -106,10 +108,6 @@ public OzoneConfiguration getConf() { return conf; } - public MiniOzoneCluster.Builder getClusterBuilder() { - return clusterBuilder; - } - public String getOmServiceId() { return omServiceId; } @@ -177,11 +175,11 @@ public static void init() throws Exception { conf.set(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, "10s"); conf.set(OZONE_KEY_DELETING_LIMIT_PER_TASK, "2"); - clusterBuilder = MiniOzoneCluster.newOMHABuilder(conf) + MiniOzoneHAClusterImpl.Builder clusterBuilder = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs); - cluster = (MiniOzoneHAClusterImpl) clusterBuilder.build(); + cluster = clusterBuilder.build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(omServiceId, conf); objectStore = client.getObjectStore(); @@ -217,6 +215,14 @@ public static void createKey(OzoneBucket ozoneBucket, String keyName) throws IOE ozoneOutputStream.close(); } + public static String createPrefixName() { + return "prefix" + RandomStringUtils.randomNumeric(5) + OZONE_URI_DELIMITER; + } + + public static void createPrefix(OzoneObj prefixObj) throws IOException { + assertTrue(objectStore.setAcl(prefixObj, Collections.emptyList())); + } + protected OzoneBucket setupBucket() throws Exception { String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java index fbf80a8a879f..716c1003d264 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java @@ -59,7 +59,6 @@ import java.net.InetSocketAddress; import java.time.Instant; import java.util.ArrayList; -import java.util.BitSet; import java.util.Collections; import java.util.List; @@ -70,6 +69,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_DELETE; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; @@ -561,7 +561,7 @@ void testAddBucketAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildBucketObj(ozoneBucket); @@ -573,7 +573,7 @@ void testRemoveBucketAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildBucketObj(ozoneBucket); @@ -586,7 +586,7 @@ void testSetBucketAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildBucketObj(ozoneBucket); @@ -608,9 +608,7 @@ private boolean compareAcls(OzoneAcl givenAcl, OzoneAcl existingAcl) { if (givenAcl.getType().equals(existingAcl.getType()) && givenAcl.getName().equals(existingAcl.getName()) && givenAcl.getAclScope().equals(existingAcl.getAclScope())) { - BitSet bitSet = (BitSet) givenAcl.getAclBitSet().clone(); - bitSet.and(existingAcl.getAclBitSet()); - return bitSet.equals(existingAcl.getAclBitSet()); + return givenAcl.equals(existingAcl); } return false; } @@ -620,7 +618,7 @@ void testAddKeyAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); String key = createKey(ozoneBucket); @@ -634,7 +632,7 @@ void testRemoveKeyAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); String key = createKey(ozoneBucket); @@ -649,7 +647,7 @@ void testSetKeyAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); String key = createKey(ozoneBucket); @@ -665,7 +663,7 @@ void testAddPrefixAcl() throws Exception { String remoteUserName = "remoteUser"; String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildPrefixObj(ozoneBucket, prefixName); @@ -678,9 +676,9 @@ void testRemovePrefixAcl() throws Exception { String remoteUserName = "remoteUser"; String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, ACCESS); + ACCESS, READ); OzoneAcl userAcl1 = new OzoneAcl(USER, "remote", - READ, ACCESS); + ACCESS, READ); OzoneObj ozoneObj = buildPrefixObj(ozoneBucket, prefixName); @@ -710,7 +708,7 @@ void testSetPrefixAcl() throws Exception { String remoteUserName = "remoteUser"; String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildPrefixObj(ozoneBucket, prefixName); @@ -726,13 +724,13 @@ void testLinkBucketAddBucketAcl() throws Exception { OzoneObj srcObj = buildBucketObj(srcBucket); // Add ACL to the LINK and verify that it is added to the source bucket - OzoneAcl acl1 = new OzoneAcl(USER, "remoteUser1", READ, DEFAULT); + OzoneAcl acl1 = new OzoneAcl(USER, "remoteUser1", DEFAULT, READ); boolean addAcl = getObjectStore().addAcl(linkObj, acl1); assertTrue(addAcl); assertEqualsAcls(srcObj, linkObj); // Add ACL to the SOURCE and verify that it from link - OzoneAcl acl2 = new OzoneAcl(USER, "remoteUser2", WRITE, DEFAULT); + OzoneAcl acl2 = new OzoneAcl(USER, "remoteUser2", DEFAULT, WRITE); boolean addAcl2 = getObjectStore().addAcl(srcObj, acl2); assertTrue(addAcl2); assertEqualsAcls(srcObj, linkObj); @@ -779,14 +777,14 @@ void testLinkBucketSetBucketAcl() throws Exception { // Set ACL to the LINK and verify that it is set to the source bucket List acl1 = Collections.singletonList( - new OzoneAcl(USER, "remoteUser1", READ, DEFAULT)); + new OzoneAcl(USER, "remoteUser1", DEFAULT, READ)); boolean setAcl1 = getObjectStore().setAcl(linkObj, acl1); assertTrue(setAcl1); assertEqualsAcls(srcObj, linkObj); // Set ACL to the SOURCE and verify that it from link List acl2 = Collections.singletonList( - new OzoneAcl(USER, "remoteUser2", WRITE, DEFAULT)); + new OzoneAcl(USER, "remoteUser2", DEFAULT, WRITE)); boolean setAcl2 = getObjectStore().setAcl(srcObj, acl2); assertTrue(setAcl2); assertEqualsAcls(srcObj, linkObj); @@ -802,12 +800,12 @@ void testLinkBucketAddKeyAcl() throws Exception { OzoneObj srcObj = buildKeyObj(srcBucket, key); String user1 = "remoteUser1"; - OzoneAcl acl1 = new OzoneAcl(USER, user1, READ, DEFAULT); + OzoneAcl acl1 = new OzoneAcl(USER, user1, DEFAULT, READ); testAddAcl(user1, linkObj, acl1); // case1: set link acl assertEqualsAcls(srcObj, linkObj); String user2 = "remoteUser2"; - OzoneAcl acl2 = new OzoneAcl(USER, user2, READ, DEFAULT); + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); testAddAcl(user2, srcObj, acl2); // case2: set src acl assertEqualsAcls(srcObj, linkObj); @@ -823,7 +821,7 @@ void testLinkBucketRemoveKeyAcl() throws Exception { OzoneObj linkObj = buildKeyObj(linkedBucket, key); OzoneObj srcObj = buildKeyObj(srcBucket, key); String user = "remoteUser1"; - OzoneAcl acl = new OzoneAcl(USER, user, READ, DEFAULT); + OzoneAcl acl = new OzoneAcl(USER, user, DEFAULT, READ); testRemoveAcl(user, linkObj, acl); assertEqualsAcls(srcObj, linkObj); @@ -834,7 +832,7 @@ void testLinkBucketRemoveKeyAcl() throws Exception { OzoneObj linkObj2 = buildKeyObj(linkedBucket2, key2); OzoneObj srcObj2 = buildKeyObj(srcBucket2, key2); String user2 = "remoteUser2"; - OzoneAcl acl2 = new OzoneAcl(USER, user2, READ, DEFAULT); + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); testRemoveAcl(user2, srcObj2, acl2); assertEqualsAcls(srcObj2, linkObj2); @@ -849,12 +847,85 @@ void testLinkBucketSetKeyAcl() throws Exception { OzoneObj srcObj = buildKeyObj(srcBucket, key); String user1 = "remoteUser1"; - OzoneAcl acl1 = new OzoneAcl(USER, user1, READ, DEFAULT); + OzoneAcl acl1 = new OzoneAcl(USER, user1, DEFAULT, READ); testSetAcl(user1, linkObj, acl1); // case1: set link acl assertEqualsAcls(srcObj, linkObj); String user2 = "remoteUser2"; - OzoneAcl acl2 = new OzoneAcl(USER, user2, READ, DEFAULT); + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); + testSetAcl(user2, srcObj, acl2); // case2: set src acl + assertEqualsAcls(srcObj, linkObj); + + } + + @Test + void testLinkBucketAddPrefixAcl() throws Exception { + OzoneBucket srcBucket = setupBucket(); + OzoneBucket linkedBucket = linkBucket(srcBucket); + String prefix = createPrefixName(); + OzoneObj linkObj = buildPrefixObj(linkedBucket, prefix); + OzoneObj srcObj = buildPrefixObj(srcBucket, prefix); + createPrefix(linkObj); + + String user1 = "remoteUser1"; + OzoneAcl acl1 = new OzoneAcl(USER, user1, DEFAULT, READ); + testAddAcl(user1, linkObj, acl1); // case1: set link acl + assertEqualsAcls(srcObj, linkObj); + + String user2 = "remoteUser2"; + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); + testAddAcl(user2, srcObj, acl2); // case2: set src acl + assertEqualsAcls(srcObj, linkObj); + + } + + @Test + void testLinkBucketRemovePrefixAcl() throws Exception { + + // CASE 1: from link bucket + OzoneBucket srcBucket = setupBucket(); + OzoneBucket linkedBucket = linkBucket(srcBucket); + String prefix = createPrefixName(); + OzoneObj linkObj = buildPrefixObj(linkedBucket, prefix); + OzoneObj srcObj = buildPrefixObj(srcBucket, prefix); + createPrefix(linkObj); + + String user = "remoteUser1"; + OzoneAcl acl = new OzoneAcl(USER, user, DEFAULT, READ); + testRemoveAcl(user, linkObj, acl); + assertEqualsAcls(srcObj, linkObj); + + // CASE 2: from src bucket + OzoneBucket srcBucket2 = setupBucket(); + OzoneBucket linkedBucket2 = linkBucket(srcBucket2); + String prefix2 = createPrefixName(); + OzoneObj linkObj2 = buildPrefixObj(linkedBucket2, prefix2); + OzoneObj srcObj2 = buildPrefixObj(srcBucket2, prefix2); + createPrefix(srcObj2); + + String user2 = "remoteUser2"; + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); + testRemoveAcl(user2, srcObj2, acl2); + assertEqualsAcls(srcObj2, linkObj2); + + } + + @Test + void testLinkBucketSetPrefixAcl() throws Exception { + OzoneBucket srcBucket = setupBucket(); + OzoneBucket linkedBucket = linkBucket(srcBucket); + String prefix = createPrefixName(); + OzoneObj linkObj = buildPrefixObj(linkedBucket, prefix); + OzoneObj srcObj = buildPrefixObj(srcBucket, prefix); + createPrefix(linkObj); + + String user1 = "remoteUser1"; + OzoneAcl acl1 = new OzoneAcl(USER, user1, DEFAULT, READ); + testSetAcl(user1, linkObj, acl1); // case1: set link acl + assertEqualsAcls(srcObj, linkObj); + + String user2 = "remoteUser2"; + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); testSetAcl(user2, srcObj, acl2); // case2: set src acl assertEqualsAcls(srcObj, linkObj); @@ -927,7 +998,7 @@ private void testSetAcl(String remoteUserName, OzoneObj ozoneObj, } OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); + DEFAULT, WRITE); List newAcls = Collections.singletonList(modifiedUserAcl); boolean setAcl = objectStore.setAcl(ozoneObj, newAcls); @@ -960,7 +1031,7 @@ private void testAddAcl(String remoteUserName, OzoneObj ozoneObj, // Add an acl by changing acl type with same type, name and scope. userAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); + DEFAULT, WRITE); addAcl = objectStore.addAcl(ozoneObj, userAcl); assertTrue(addAcl); } @@ -981,7 +1052,7 @@ private void testAddLinkAcl(String remoteUserName, OzoneObj ozoneObj, // Add an acl by changing acl type with same type, name and scope. userAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); + DEFAULT, WRITE); addAcl = objectStore.addAcl(ozoneObj, userAcl); assertTrue(addAcl); } @@ -990,8 +1061,16 @@ private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, OzoneAcl userAcl) throws Exception { ObjectStore objectStore = getObjectStore(); - // As by default create will add some default acls in RpcClient. - List acls = objectStore.getAcl(ozoneObj); + // Other than prefix, by default create will add some default acls in RpcClient. + List acls; + if (ozoneObj.getResourceType().equals(OzoneObj.ResourceType.PREFIX)) { + objectStore.addAcl(ozoneObj, userAcl); + // Add another arbitrary group ACL since the prefix will be removed when removing + // the last ACL for the prefix and PREFIX_NOT_FOUND will be thrown + OzoneAcl groupAcl = new OzoneAcl(GROUP, "arbitrary-group", ACCESS, READ); + objectStore.addAcl(ozoneObj, groupAcl); + } + acls = objectStore.getAcl(ozoneObj); assertTrue(acls.size() > 0); @@ -1008,7 +1087,7 @@ private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, // Just changed acl type here to write, rest all is same as defaultUserAcl. OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); + DEFAULT, WRITE); addAcl = objectStore.addAcl(ozoneObj, modifiedUserAcl); assertTrue(addAcl); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java index ab9f6382f0e1..2d34f5fc403e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -24,6 +25,7 @@ import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; @@ -187,11 +189,12 @@ private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), 1, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -362,7 +365,7 @@ private void validateListParts(OzoneBucket ozoneBucket, String keyName, for (int i = 0; i < partsMap.size(); i++) { assertEquals(partsMap.get(partInfoList.get(i).getPartNumber()), - partInfoList.get(i).getPartName()); + partInfoList.get(i).getETag()); } @@ -379,9 +382,10 @@ private String createMultipartUploadPartKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), partNumber, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); - return ozoneOutputStream.getCommitUploadPartInfo().getPartName(); + return ozoneOutputStream.getCommitUploadPartInfo().getETag(); } @Test @@ -470,7 +474,7 @@ void testIncrementalWaitTimeWithSameNodeFailover() throws Exception { String leaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId(); getCluster().stopOzoneManager(leaderOMNodeId); - Thread.sleep(NODE_FAILURE_TIMEOUT * 4); + getCluster().waitForLeaderOM(); createKeyTest(true); // failover should happen to new node long numTimesTriedToSameNode = omFailoverProxyProvider.getWaitTime() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java index 41f1c14f3727..72f1c3374b28 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java @@ -46,6 +46,7 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.ozone.OzoneAcl; @@ -197,6 +198,8 @@ private void setupEnvironment(boolean aclEnabled, OzoneManager.setTestSecureOmFlag(true); om = OzoneManager.createOm(conf); + om.setScmTopologyClient(new ScmTopologyClient( + new ScmBlockLocationTestingClient(null, null, 0))); om.setCertClient(new CertificateClientTestImpl(conf)); om.start(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 14b1a30b44f1..cc0e1feaa548 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.SafeMode; +import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -35,13 +41,12 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.scm.TestStorageContainerManagerHelper; +import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.AfterEach; @@ -54,12 +59,17 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; -import java.util.Map; +import java.util.concurrent.TimeoutException; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -97,9 +107,9 @@ public void init() throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_STALENODE_INTERVAL, "10s"); conf.set(OZONE_SCM_DEADNODE_INTERVAL, "25s"); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500, MILLISECONDS); builder = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(500) .setStartDataNodes(false); cluster = builder.build(); cluster.startHddsDatanodes(); @@ -127,10 +137,7 @@ public void shutdown() { @Test void testSafeModeOperations() throws Exception { - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(100, 4096); + TestDataUtil.createKeys(cluster, 100); final List containers = cluster .getStorageContainerManager().getContainerManager().getContainers(); GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000); @@ -216,10 +223,7 @@ void testSCMSafeMode() throws Exception { assertFalse(cluster.getStorageContainerManager().isInSafeMode()); // Test2: Test safe mode when containers are there in system. - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(100 * 2, 4096); + TestDataUtil.createKeys(cluster, 100 * 2); final List containers = cluster .getStorageContainerManager().getContainerManager().getContainers(); GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000 * 30); @@ -291,9 +295,7 @@ public void testSCMSafeModeRestrictedOp() throws Exception { cluster.waitTobeOutOfSafeMode(); assertFalse(scm.isInSafeMode()); - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - helper.createKeys(10, 4096); + TestDataUtil.createKeys(cluster, 10); SCMClientProtocolServer clientProtocolServer = cluster .getStorageContainerManager().getClientProtocolServer(); assertFalse((scm.getClientProtocolServer()).getSafeModeStatus()); @@ -323,8 +325,6 @@ public void testSCMSafeModeDisabled() throws Exception { conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false); conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, 3); builder = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(500) .setNumDatanodes(3); cluster = builder.build(); StorageContainerManager scm = cluster.getStorageContainerManager(); @@ -334,4 +334,44 @@ public void testSCMSafeModeDisabled() throws Exception { cluster.restartStorageContainerManager(true); assertFalse(scm.isInSafeMode()); } + + @Test + public void testCreateRetryWhileSCMSafeMode() throws Exception { + // Test1: Test safe mode when there are no containers in system. + cluster.stop(); + cluster = builder.build(); + + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + OMMetrics omMetrics = cluster.getOzoneManager().getMetrics(); + long allocateBlockReqCount = omMetrics.getNumBlockAllocateFails(); + + try (FileSystem fs = FileSystem.get(conf)) { + assertTrue(((SafeMode)fs).setSafeMode(SafeModeAction.GET)); + + Thread t = new Thread(() -> { + try { + LOG.info("Wait for allocate block fails at least once"); + GenericTestUtils.waitFor(() -> omMetrics.getNumBlockAllocateFails() > allocateBlockReqCount, + 100, 10000); + + cluster.startHddsDatanodes(); + cluster.waitForClusterToBeReady(); + cluster.waitTobeOutOfSafeMode(); + } catch (InterruptedException | TimeoutException e) { + throw new RuntimeException(e); + } + }); + t.start(); + + final Path file = new Path("file"); + try (FSDataOutputStream outputStream = fs.create(file, true)) { + LOG.info("Successfully created a file"); + } + t.join(); + } + + assertFalse(cluster.getStorageContainerManager().isInSafeMode()); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/RangerUserRequest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/RangerUserRequest.java index 07bb3cf96270..cde89599fbea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/RangerUserRequest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/RangerUserRequest.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.ozone.om.multitenant; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; -import com.google.gson.JsonParseException; -import com.google.gson.JsonParser; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonNode; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.kerby.util.Base64; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -191,7 +190,7 @@ private String getResponseData(HttpURLConnection urlConnection) } private HttpURLConnection makeHttpGetCall(String urlString, - String method, boolean isSpnego) throws IOException { + String method, boolean isSpnego) throws IOException { URL url = new URL(urlString); final HttpURLConnection urlConnection = openURLConnection(url); @@ -215,14 +214,16 @@ public String getUserId(String userPrincipal) throws IOException { String response = getResponseData(conn); String userIDCreated = null; try { - JsonObject jResonse = JsonParser.parseString(response).getAsJsonObject(); - JsonArray userinfo = jResonse.get("vXUsers").getAsJsonArray(); + JsonNode jResponse = + JsonUtils.readTree(response); + JsonNode userinfo = jResponse.path("vXUsers"); int numIndex = userinfo.size(); + for (int i = 0; i < numIndex; ++i) { - if (userinfo.get(i).getAsJsonObject().get("name").getAsString() - .equals(userPrincipal)) { - userIDCreated = - userinfo.get(i).getAsJsonObject().get("id").getAsString(); + JsonNode userNode = userinfo.get(i); + String name = userNode.path("name").asText(); + if (name.equals(userPrincipal)) { + userIDCreated = userNode.path("id").asText(); break; } } @@ -231,6 +232,7 @@ public String getUserId(String userPrincipal) throws IOException { e.printStackTrace(); throw e; } + return userIDCreated; } @@ -253,8 +255,8 @@ public String createUser(String userName, String password) String userId; try { assert userInfo != null; - JsonObject jObject = JsonParser.parseString(userInfo).getAsJsonObject(); - userId = jObject.get("id").getAsString(); + JsonNode jNode = JsonUtils.readTree(userInfo); + userId = jNode.get("id").asText(); LOG.debug("Ranger returned userId: {}", userId); } catch (JsonParseException e) { e.printStackTrace(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java index 1cb436dcb38d..078266581cbc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java @@ -49,6 +49,7 @@ import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MULTITENANCY_ENABLED; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -76,9 +77,9 @@ public static void initClusterProvider() throws Exception { conf.setBoolean( OMMultiTenantManagerImpl.OZONE_OM_TENANT_DEV_SKIP_RANGER, true); conf.setBoolean(OZONE_OM_MULTITENANCY_ENABLED, true); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.INITIAL_VERSION.layoutVersion()); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newBuilder(conf) - .withoutDatanodes() - .setOmLayoutVersion(OMLayoutFeature.INITIAL_VERSION.layoutVersion()); + .withoutDatanodes(); cluster = builder.build(); client = cluster.newClient(); s3VolumeName = HddsClientUtils.getDefaultS3VolumeName(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 029b0813bb55..c123675565aa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -24,11 +24,11 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -102,6 +102,7 @@ import java.util.Iterator; import java.util.Set; import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; @@ -111,6 +112,7 @@ import static org.apache.commons.lang3.StringUtils.leftPad; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -118,7 +120,9 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; @@ -171,6 +175,7 @@ public abstract class TestOmSnapshot { private ObjectStore store; private OzoneManager ozoneManager; private OzoneBucket ozoneBucket; + private OzoneConfiguration conf; private final BucketLayout bucketLayout; private final boolean enabledFileSystemPaths; @@ -195,7 +200,7 @@ public TestOmSnapshot(BucketLayout newBucketLayout, * Create a MiniDFSCluster for testing. */ private void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); + conf = new OzoneConfiguration(); conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); @@ -207,10 +212,11 @@ private void init() throws Exception { conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()); + conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumOfOzoneManagers(3) - .setOmLayoutVersion(OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()) .build(); cluster.waitForClusterToBeReady(); @@ -236,6 +242,12 @@ private void stopKeyManager() throws IOException { keyManager.stop(); } + private void startKeyManager() throws IOException { + KeyManagerImpl keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils + .getInternalState(ozoneManager, "keyManager"); + keyManager.start(conf); + } + private RDBStore getRdbStore() { return (RDBStore) ozoneManager.getMetadataManager().getStore(); } @@ -1078,7 +1090,7 @@ public void testSnapdiffWithObjectMetaModification() throws Exception { createSnapshot(testVolumeName, testBucketName, snap1); OzoneObj keyObj = buildKeyObj(bucket, key1); OzoneAcl userAcl = new OzoneAcl(USER, "user", - WRITE, DEFAULT); + DEFAULT, WRITE); store.addAcl(keyObj, userAcl); String snap2 = "snap2"; @@ -1937,13 +1949,13 @@ public void testSnapshotQuotaHandling() throws Exception { private List getKeyTableSstFiles() throws IOException { if (!bucketLayout.isFileSystemOptimized()) { - return getRdbStore().getDb().getSstFileList().stream().filter( - x -> new String(x.columnFamilyName(), UTF_8).equals( - OmMetadataManagerImpl.KEY_TABLE)).collect(Collectors.toList()); + return getRdbStore().getDb().getSstFileList().stream() + .filter(x -> StringUtils.bytes2String(x.columnFamilyName()).equals(OmMetadataManagerImpl.KEY_TABLE)) + .collect(Collectors.toList()); } - return getRdbStore().getDb().getSstFileList().stream().filter( - x -> new String(x.columnFamilyName(), UTF_8).equals( - OmMetadataManagerImpl.FILE_TABLE)).collect(Collectors.toList()); + return getRdbStore().getDb().getSstFileList().stream() + .filter(x -> StringUtils.bytes2String(x.columnFamilyName()).equals(OmMetadataManagerImpl.FILE_TABLE)) + .collect(Collectors.toList()); } private void flushKeyTable() throws IOException { @@ -2030,7 +2042,7 @@ public void testSnapshotOpensWithDisabledAutoCompaction() throws Exception { String snapPrefix = createSnapshot(volumeName, bucketName); try (RDBStore snapshotDBStore = (RDBStore) ((OmSnapshot) cluster.getOzoneManager().getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, snapPrefix, false).get()) + .getActiveFsMetadataOrSnapshot(volumeName, bucketName, snapPrefix).get()) .getMetadataManager().getStore()) { for (String table : snapshotDBStore.getTableNames().values()) { assertTrue(snapshotDBStore.getDb().getColumnFamily(table) @@ -2106,7 +2118,7 @@ public void testSnapshotDiffWhenOmRestartAndReportIsPartiallyFetched() await(POLL_MAX_WAIT_MILLIS, POLL_INTERVAL_MILLIS, () -> cluster.getOzoneManager().isRunning()); - while (nextToken == null || StringUtils.isNotEmpty(nextToken)) { + while (nextToken == null || !nextToken.isEmpty()) { diffReport = fetchReportPage(volumeName, bucketName, snapshot1, snapshot2, nextToken, pageSize); diffReportEntries.addAll(diffReport.getDiffList()); @@ -2160,7 +2172,7 @@ public void testCompactionDagDisableForSnapshotMetadata() throws Exception { OmSnapshot omSnapshot = (OmSnapshot) cluster.getOzoneManager() .getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, snapshotName, false).get(); + .getActiveFsMetadataOrSnapshot(volumeName, bucketName, snapshotName).get(); RDBStore snapshotDbStore = (RDBStore) omSnapshot.getMetadataManager().getStore(); @@ -2481,4 +2493,49 @@ public void testSnapshotCompactionDag() throws Exception { fetchReportPage(volume1, bucket3, "bucket3-snap1", "bucket3-snap3", null, 0).getDiffList().size()); } + + @Test + public void testSnapshotReuseSnapName() throws Exception { + // start KeyManager for this test + startKeyManager(); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); + store.createVolume(volume); + OzoneVolume volume1 = store.getVolume(volume); + volume1.createBucket(bucket); + OzoneBucket bucket1 = volume1.getBucket(bucket); + // Create Key1 and take snapshot + String key1 = "key-1-"; + createFileKeyWithPrefix(bucket1, key1); + String snap1 = "snap" + counter.incrementAndGet(); + String snapshotKeyPrefix = createSnapshot(volume, bucket, snap1); + + int keyCount1 = keyCount(bucket1, snapshotKeyPrefix + "key-"); + assertEquals(1, keyCount1); + + store.deleteSnapshot(volume, bucket, snap1); + + GenericTestUtils.waitFor(() -> { + try { + return !ozoneManager.getMetadataManager().getSnapshotInfoTable() + .isExist(SnapshotInfo.getTableKey(volume, bucket, snap1)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 200, 10000); + + createFileKeyWithPrefix(bucket1, key1); + String snap2 = "snap" + counter.incrementAndGet(); + createSnapshot(volume, bucket, snap2); + + String key2 = "key-2-"; + createFileKeyWithPrefix(bucket1, key2); + createSnapshot(volume, bucket, snap1); + + int keyCount2 = keyCount(bucket1, snapshotKeyPrefix + "key-"); + assertEquals(3, keyCount2); + + // Stop key manager after testcase executed + stopKeyManager(); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabled.java similarity index 91% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabled.java index fd1a60128de1..95a24b8ca99c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabled.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.junit.jupiter.api.AfterAll; @@ -43,7 +44,7 @@ */ public class TestOmSnapshotDisabled { - private static MiniOzoneCluster cluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static OzoneClient client; private static ObjectStore store; @@ -57,17 +58,13 @@ public static void init() throws Exception { // Disable filesystem snapshot feature for this test conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, false); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(3) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); - OzoneManager leaderOzoneManager = - ((MiniOzoneHAClusterImpl) cluster).getOMLeader(); - OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); - cluster.setConf(leaderConfig); store = client.getObjectStore(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabledRestart.java similarity index 93% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabledRestart.java index babc643ffa01..91ad9eb8fe55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabledRestart.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -54,16 +55,13 @@ public static void init() throws Exception { // Enable filesystem snapshot feature at the beginning conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test2") .setNumOfOzoneManagers(3) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); - OzoneManager leaderOzoneManager = cluster.getOMLeader(); - OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); - cluster.setConf(leaderConfig); store = client.getObjectStore(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java similarity index 98% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java index 055ddeb20c9a..0849b9007810 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -41,12 +41,16 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.snapshot.TestOmSnapshot; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; @@ -623,6 +627,7 @@ private void createAndCommitKey(String keyName) throws IOException { .setBucketName(bucketName).setKeyName(keyName) .setAcls(Collections.emptyList()) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .setLocationInfoList(new ArrayList<>()).build(); OpenKeySession session = writeClient.openKey(keyArgs); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemFso.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemFso.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java index 66d395160201..47bdd8f3bd52 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemFso.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.junit.jupiter.api.Timeout; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemLegacy.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java index 86682b2cbc19..b8d81c31cf5f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemLegacy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.junit.jupiter.api.Timeout; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java index 5ed2f848aed8..06fbebb2efa2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.snapshot; import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.Timeout; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; @@ -30,7 +29,6 @@ */ @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @Timeout(300) -@Unhealthy("HDDS-10149") class TestOmSnapshotFsoWithNativeLib extends TestOmSnapshot { TestOmSnapshotFsoWithNativeLib() throws Exception { super(FILE_SYSTEM_OPTIMIZED, false, false, false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java index 1d3ddb08a684..341b5b78c603 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -30,6 +30,9 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.junit.jupiter.api.AfterAll; @@ -70,7 +73,7 @@ public static void staticInit() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test") .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java index 8c0b375c3ca9..5694edd773ea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java @@ -110,16 +110,12 @@ public static void init() throws Exception { final String omServiceId = "om-service-test-1" + RandomStringUtils.randomNumeric(32); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(1) .build(); cluster.waitForClusterToBeReady(); - ozoneManager = cluster.getOzoneManager(); - final OzoneConfiguration ozoneManagerConf = ozoneManager.getConfiguration(); - cluster.setConf(ozoneManagerConf); - final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; final OzoneConfiguration clientConf = new OzoneConfiguration(cluster.getConf()); @@ -128,12 +124,13 @@ public static void init() throws Exception { client = cluster.newClient(); objectStore = client.getObjectStore(); + ozoneManager = cluster.getOzoneManager(); final KeyManagerImpl keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); // stop the deletion services so that keys can still be read keyManager.stop(); - OMStorage.getOmDbDir(ozoneManagerConf); + OMStorage.getOmDbDir(cluster.getConf()); } @AfterAll @@ -630,7 +627,7 @@ private void createBucket(BucketLayout bucketLayout, private void createVolume() throws IOException { final String volumePrefix = "volume-"; volumeName = volumePrefix + RandomStringUtils.randomNumeric(32); - final VolumeArgs volumeArgs = new VolumeArgs.Builder() + final VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setAdmin(ADMIN) .setOwner(ADMIN) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index 643191b36d41..d28f25a28fac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -66,7 +66,7 @@ public void init() throws Exception { omServiceId = "om-service-test1"; conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java index e0d01c148d6b..4cd2f98c2b8b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java @@ -72,7 +72,7 @@ @Timeout(value = 300) public class TestOzoneSnapshotRestore { private static final String OM_SERVICE_ID = "om-service-test-1"; - private MiniOzoneCluster cluster; + private MiniOzoneHAClusterImpl cluster; private ObjectStore store; private OzoneManager leaderOzoneManager; private OzoneConfiguration clientConf; @@ -105,18 +105,17 @@ public void init() throws Exception { String serviceID = OM_SERVICE_ID + RandomStringUtils.randomNumeric(5); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(serviceID) .setNumOfOzoneManagers(3) .build(); cluster.waitForClusterToBeReady(); - leaderOzoneManager = ((MiniOzoneHAClusterImpl) cluster).getOMLeader(); + leaderOzoneManager = cluster.getOMLeader(); OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); - cluster.setConf(leaderConfig); String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + serviceID; - clientConf = new OzoneConfiguration(cluster.getConf()); + clientConf = new OzoneConfiguration(leaderConfig); clientConf.set(FS_DEFAULT_NAME_KEY, hostPrefix); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java index a7bc55446413..2f7e1bd5a9d6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -34,14 +34,16 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.ozone.compaction.log.CompactionLogEntry; @@ -76,7 +78,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.TestOzoneManagerHAWithStoppedNodes.createKey; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -153,7 +154,7 @@ public void init(TestInfo testInfo) throws Exception { OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, SNAPSHOT_THRESHOLD); int numOfOMs = 3; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) @@ -259,12 +260,11 @@ public void testSnapshotAndKeyDeletionBackgroundServices() // get snapshot c OmSnapshot snapC; - try (ReferenceCounted rcC = newLeaderOM + try (ReferenceCounted rcC = newLeaderOM .getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, - getSnapshotPrefix(snapshotInfoC.getName()), true)) { + .getSnapshot(volumeName, bucketName, snapshotInfoC.getName())) { assertNotNull(rcC); - snapC = (OmSnapshot) rcC.get(); + snapC = rcC.get(); } // assert that key a is in snapshot c's deleted table @@ -284,12 +284,11 @@ public void testSnapshotAndKeyDeletionBackgroundServices() // get snapshot d OmSnapshot snapD; - try (ReferenceCounted rcD = newLeaderOM + try (ReferenceCounted rcD = newLeaderOM .getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, - getSnapshotPrefix(snapshotInfoD.getName()), true)) { + .getSnapshot(volumeName, bucketName, snapshotInfoD.getName())) { assertNotNull(rcD); - snapD = (OmSnapshot) rcD.get(); + snapD = rcD.get(); } // wait until key a appears in deleted table of snapshot d diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java similarity index 84% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java index 6e3e4fd7f404..472abf42095d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java @@ -17,7 +17,7 @@ * */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -32,18 +32,26 @@ import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,19 +62,23 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * Test Snapshot Deleting Service. */ + +@Timeout(300) +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(OrderAnnotation.class) public class TestSnapshotDeletingService { private static final Logger LOG = @@ -82,8 +94,11 @@ public class TestSnapshotDeletingService { private static final String VOLUME_NAME = "vol1"; private static final String BUCKET_NAME_ONE = "bucket1"; private static final String BUCKET_NAME_TWO = "bucket2"; + private static final String BUCKET_NAME_FSO = "bucketfso"; + + private boolean runIndividualTest = true; - @BeforeEach + @BeforeAll public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, @@ -112,7 +127,7 @@ public void setup() throws Exception { client, VOLUME_NAME, BUCKET_NAME_ONE, BucketLayout.DEFAULT); } - @AfterEach + @AfterAll public void teardown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -121,21 +136,23 @@ public void teardown() { } @Test + @Order(2) public void testSnapshotSplitAndMove() throws Exception { - SnapshotDeletingService snapshotDeletingService = - om.getKeyManager().getSnapshotDeletingService(); - Table snapshotInfoTable = - om.getMetadataManager().getSnapshotInfoTable(); - createSnapshotDataForBucket1(); + if (runIndividualTest) { + SnapshotDeletingService snapshotDeletingService = + om.getKeyManager().getSnapshotDeletingService(); + Table snapshotInfoTable = + om.getMetadataManager().getSnapshotInfoTable(); - assertTableRowCount(snapshotInfoTable, 2); - GenericTestUtils.waitFor(() -> snapshotDeletingService - .getSuccessfulRunCount() >= 1, 1000, 10000); + createSnapshotDataForBucket1(); - OmSnapshot bucket1snap3 = (OmSnapshot) om.getOmSnapshotManager() - .checkForSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - getSnapshotPrefix("bucket1snap3"), true).get(); + assertTableRowCount(snapshotInfoTable, 2); + GenericTestUtils.waitFor(() -> snapshotDeletingService + .getSuccessfulRunCount() >= 1, 1000, 10000); + } + OmSnapshot bucket1snap3 = om.getOmSnapshotManager() + .getSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, "bucket1snap3").get(); // Check bucket1key1 added to next non deleted snapshot db. List> omKeyInfos = @@ -146,12 +163,14 @@ public void testSnapshotSplitAndMove() throws Exception { } @Test + @Order(1) public void testMultipleSnapshotKeyReclaim() throws Exception { Table deletedTable = om.getMetadataManager().getDeletedTable(); Table snapshotInfoTable = om.getMetadataManager().getSnapshotInfoTable(); + runIndividualTest = false; createSnapshotDataForBucket1(); @@ -190,12 +209,17 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { // verify the cache of purged snapshot // /vol1/bucket2/bucket2snap1 has been cleaned up from cache map - SnapshotCache snapshotCache = om.getOmSnapshotManager().getSnapshotCache(); - assertEquals(2, snapshotCache.size()); + assertEquals(2, om.getOmSnapshotManager().getSnapshotCacheSize()); + + // cleaning up the data + client.getProxy().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, "bucket1snap1"); + client.getProxy().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, "bucket1snap3"); + client.getProxy().deleteBucket(VOLUME_NAME, BUCKET_NAME_TWO); } @SuppressWarnings("checkstyle:MethodLength") @Test + @Order(3) public void testSnapshotWithFSO() throws Exception { Table dirTable = om.getMetadataManager().getDirectoryTable(); @@ -215,7 +239,7 @@ public void testSnapshotWithFSO() throws Exception { .build(); OzoneBucket bucket2 = TestDataUtil.createBucket( - client, VOLUME_NAME, bucketArgs, BUCKET_NAME_TWO); + client, VOLUME_NAME, bucketArgs, BUCKET_NAME_FSO); // Create 10 keys for (int i = 1; i <= 10; i++) { @@ -233,12 +257,12 @@ public void testSnapshotWithFSO() throws Exception { for (int i = 1; i <= 3; i++) { String parent = "parent" + i; client.getProxy().createDirectory(VOLUME_NAME, - BUCKET_NAME_TWO, parent); + BUCKET_NAME_FSO, parent); for (int j = 1; j <= 3; j++) { String childFile = "/childFile" + j; String childDir = "/childDir" + j; client.getProxy().createDirectory(VOLUME_NAME, - BUCKET_NAME_TWO, parent + childDir); + BUCKET_NAME_FSO, parent + childDir); TestDataUtil.createKey(bucket2, parent + childFile, ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); } @@ -250,7 +274,7 @@ public void testSnapshotWithFSO() throws Exception { assertTableRowCount(deletedDirTable, 0); // Create Snapshot1 - client.getObjectStore().createSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, + client.getObjectStore().createSnapshot(VOLUME_NAME, BUCKET_NAME_FSO, "snap1"); assertTableRowCount(snapshotInfoTable, 1); @@ -263,37 +287,37 @@ public void testSnapshotWithFSO() throws Exception { // Delete 5 Keys for (int i = 1; i <= 5; i++) { - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_TWO, + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_FSO, "key" + i, false); } // Rename Keys 3 keys for (int i = 6; i <= 8; i++) { - client.getProxy().renameKey(VOLUME_NAME, BUCKET_NAME_TWO, "key" + i, + client.getProxy().renameKey(VOLUME_NAME, BUCKET_NAME_FSO, "key" + i, "renamedKey" + i); } // Rename 1 Dir for (int i = 1; i <= 1; i++) { - client.getProxy().renameKey(VOLUME_NAME, BUCKET_NAME_TWO, "/parent" + i, + client.getProxy().renameKey(VOLUME_NAME, BUCKET_NAME_FSO, "/parent" + i, "/renamedParent" + i); } // Delete 2 Dirs for (int i = 2; i <= 3; i++) { - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_TWO, "/parent" + i, + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_FSO, "/parent" + i, true); } assertTableRowCount(renamedTable, 4); // Delete Renamed Keys for (int i = 6; i <= 8; i++) { - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_TWO, + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_FSO, "renamedKey" + i, false); } // Delete Renamed Dir for (int i = 1; i <= 1; i++) { - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_TWO, + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_FSO, "/renamedParent" + i, true); } @@ -303,7 +327,7 @@ public void testSnapshotWithFSO() throws Exception { assertTableRowCount(renamedTable, 4); // Create Snapshot2 - client.getObjectStore().createSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, + client.getObjectStore().createSnapshot(VOLUME_NAME, BUCKET_NAME_FSO, "snap2"); assertTableRowCount(snapshotInfoTable, 2); @@ -315,7 +339,7 @@ public void testSnapshotWithFSO() throws Exception { // Delete 3 overwritten keys for (int i = 11; i <= 13; i++) { - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_TWO, + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_FSO, "key" + i, false); } @@ -327,14 +351,14 @@ public void testSnapshotWithFSO() throws Exception { // Delete 2 more keys for (int i = 9; i <= 10; i++) { - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_TWO, + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_FSO, "key" + i, false); } assertTableRowCount(deletedTable, 7); // Create Snapshot3 - client.getObjectStore().createSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, + client.getObjectStore().createSnapshot(VOLUME_NAME, BUCKET_NAME_FSO, "snap3"); assertTableRowCount(snapshotInfoTable, 3); @@ -343,25 +367,24 @@ public void testSnapshotWithFSO() throws Exception { assertTableRowCount(deletedTable, 0); assertTableRowCount(keyTable, 11); SnapshotInfo deletedSnap = om.getMetadataManager() - .getSnapshotInfoTable().get("/vol1/bucket2/snap2"); + .getSnapshotInfoTable().get("/vol1/bucketfso/snap2"); - client.getObjectStore().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, + client.getObjectStore().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_FSO, "snap2"); assertTableRowCount(snapshotInfoTable, 2); // Delete 2 overwritten keys for (int i = 14; i <= 15; i++) { - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_TWO, + client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_FSO, "key" + i, false); } assertTableRowCount(deletedTable, 2); // Once all the tables are moved, the snapshot is deleted assertTableRowCount(om.getMetadataManager().getSnapshotInfoTable(), 2); - verifySnapshotChain(deletedSnap, "/vol1/bucket2/snap3"); - OmSnapshot snap3 = (OmSnapshot) om.getOmSnapshotManager() - .checkForSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, - getSnapshotPrefix("snap3"), true).get(); + verifySnapshotChain(deletedSnap, "/vol1/bucketfso/snap3"); + OmSnapshot snap3 = om.getOmSnapshotManager() + .getSnapshot(VOLUME_NAME, BUCKET_NAME_FSO, "snap3").get(); Table snapDeletedDirTable = snap3.getMetadataManager().getDeletedDirTable(); @@ -380,7 +403,7 @@ public void testSnapshotWithFSO() throws Exception { assertTableRowCount(deletedDirTable, 0); assertTableRowCount(deletedTable, 2); // Delete Snapshot3 and check entries moved to active DB - client.getObjectStore().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, + client.getObjectStore().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_FSO, "snap3"); // Check entries moved to active DB @@ -388,10 +411,10 @@ public void testSnapshotWithFSO() throws Exception { assertTableRowCount(renamedTable, 4); assertTableRowCount(deletedDirTable, 3); - ReferenceCounted rcSnap1 = - om.getOmSnapshotManager().checkForSnapshot( - VOLUME_NAME, BUCKET_NAME_TWO, getSnapshotPrefix("snap1"), true); - OmSnapshot snap1 = (OmSnapshot) rcSnap1.get(); + ReferenceCounted rcSnap1 = + om.getOmSnapshotManager().getSnapshot( + VOLUME_NAME, BUCKET_NAME_FSO, "snap1"); + OmSnapshot snap1 = rcSnap1.get(); Table snap1KeyTable = snap1.getMetadataManager().getFileTable(); try (TableIterator table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 6b39b76c5466..fac6764767f6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -46,17 +47,17 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * Test Snapshot Directory Service. @@ -114,15 +115,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } @SuppressWarnings("checkstyle:LineLength") @@ -258,15 +257,12 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java index 8f11941fcbf6..dff4cd046c9b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java @@ -66,7 +66,7 @@ void setup() throws Exception { String omServiceId = "omServiceId1"; OzoneConfiguration conf = new OzoneConfiguration(); String scmServiceId = "scmServiceId"; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfOzoneManagers(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java index 7691704d924c..be94cf42d080 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java @@ -243,10 +243,10 @@ void testNodesInDecommissionOrMaintenance( // First node goes offline. if (isMaintenance) { scmClient.startMaintenanceNodes(Collections.singletonList( - TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), 0); + TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), 0, true); } else { scmClient.decommissionNodes(Collections.singletonList( - TestNodeUtil.getDNHostAndPort(nodeToGoOffline1))); + TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), false); } TestNodeUtil.waitForDnToReachOpState(scmNodeManager, @@ -270,10 +270,10 @@ void testNodesInDecommissionOrMaintenance( // Second node goes offline. if (isMaintenance) { scmClient.startMaintenanceNodes(Collections.singletonList( - TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), 0); + TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), 0, true); } else { scmClient.decommissionNodes(Collections.singletonList( - TestNodeUtil.getDNHostAndPort(nodeToGoOffline2))); + TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), false); } TestNodeUtil.waitForDnToReachOpState(scmNodeManager, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java new file mode 100644 index 000000000000..8c334780d94f --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.recon.api.ContainerEndpoint; +import org.apache.hadoop.ozone.recon.api.types.KeyMetadata; +import org.apache.hadoop.ozone.recon.api.types.KeysResponse; +import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; +import java.nio.charset.StandardCharsets; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import javax.ws.rs.core.Response; +import java.io.IOException; +import java.util.Collection; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Integration test fo recon container endpoint. + */ +public class TestReconContainerEndpoint { + + private OzoneConfiguration conf; + private MiniOzoneCluster cluster; + private OzoneClient client; + private ObjectStore store; + + @BeforeEach + public void init() throws Exception { + conf = new OzoneConfiguration(); + conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, + OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .includeRecon(true) + .build(); + cluster.waitForClusterToBeReady(); + client = cluster.newClient(); + store = client.getObjectStore(); + } + + @AfterEach + public void shutdown() throws IOException { + if (client != null) { + client.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testContainerEndpointForFSOLayout() throws Exception { + // Setup: Create multiple volumes, buckets, and key hierarchies + String volName = "testvol"; + String bucketName = "fsobucket"; + // Scenario 1: Deeply nested directories + String nestedDirKey = "dir1/dir2/dir3/file1"; + // Scenario 2: Single file in a bucket + String singleFileKey = "file1"; + + // Create volume and bucket + store.createVolume(volName); + OzoneVolume volume = store.getVolume(volName); + volume.createBucket(bucketName, BucketArgs.newBuilder() + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build()); + + // Write keys to the bucket + writeTestData(volName, bucketName, nestedDirKey, "data1"); + writeTestData(volName, bucketName, singleFileKey, "data2"); + + // Synchronize data from OM to Recon + OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl) + cluster.getReconServer().getOzoneManagerServiceProvider(); + impl.syncDataFromOM(); + + //Search for the bucket from the bucket table and verify its FSO + OmBucketInfo bucketInfo = cluster.getOzoneManager().getBucketInfo(volName, bucketName); + assertNotNull(bucketInfo); + assertEquals(BucketLayout.FILE_SYSTEM_OPTIMIZED, + bucketInfo.getBucketLayout()); + + // Assuming a known container ID that these keys have been written into + long testContainerID = 1L; + + // Query the ContainerEndpoint for the keys in the specified container + Response response = getContainerEndpointResponse(testContainerID); + + assertNotNull(response, "Response should not be null."); + assertEquals(Response.Status.OK.getStatusCode(), response.getStatus(), + "Expected HTTP 200 OK response."); + + KeysResponse data = (KeysResponse) response.getEntity(); + Collection keyMetadataList = data.getKeys(); + + assertEquals(1, data.getTotalCount()); + assertEquals(1, keyMetadataList.size()); + + // Assert the file name and the complete path. + KeyMetadata keyMetadata = keyMetadataList.iterator().next(); + assertEquals("file1", keyMetadata.getKey()); + assertEquals("testvol/fsobucket/dir1/dir2/dir3/file1", keyMetadata.getCompletePath()); + + testContainerID = 2L; + response = getContainerEndpointResponse(testContainerID); + data = (KeysResponse) response.getEntity(); + keyMetadataList = data.getKeys(); + assertEquals(1, data.getTotalCount()); + assertEquals(1, keyMetadataList.size()); + + // Assert the file name and the complete path. + keyMetadata = keyMetadataList.iterator().next(); + assertEquals("file1", keyMetadata.getKey()); + assertEquals("testvol/fsobucket/file1", keyMetadata.getCompletePath()); + } + + @Test + public void testContainerEndpointForOBSBucket() throws Exception { + String volumeName = "testvol2"; + String obsBucketName = "obsbucket"; + String obsSingleFileKey = "file1"; + + // Setup volume and OBS bucket + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(obsBucketName, + BucketArgs.newBuilder().setBucketLayout(BucketLayout.OBJECT_STORE) + .build()); + + // Write a single file to the OBS bucket + writeTestData(volumeName, obsBucketName, obsSingleFileKey, "Hello OBS!"); + + OzoneManagerServiceProviderImpl impl = + (OzoneManagerServiceProviderImpl) cluster.getReconServer() + .getOzoneManagerServiceProvider(); + impl.syncDataFromOM(); + + // Search for the bucket from the bucket table and verify its OBS + OmBucketInfo bucketInfo = cluster.getOzoneManager().getBucketInfo(volumeName, obsBucketName); + assertNotNull(bucketInfo); + assertEquals(BucketLayout.OBJECT_STORE, bucketInfo.getBucketLayout()); + + // Initialize the ContainerEndpoint + long containerId = 1L; + Response response = getContainerEndpointResponse(containerId); + + assertNotNull(response, "Response should not be null."); + assertEquals(Response.Status.OK.getStatusCode(), response.getStatus(), + "Expected HTTP 200 OK response."); + KeysResponse data = (KeysResponse) response.getEntity(); + Collection keyMetadataList = data.getKeys(); + + assertEquals(1, data.getTotalCount()); + assertEquals(1, keyMetadataList.size()); + + KeyMetadata keyMetadata = keyMetadataList.iterator().next(); + assertEquals("file1", keyMetadata.getKey()); + assertEquals("testvol2/obsbucket/file1", keyMetadata.getCompletePath()); + } + + private Response getContainerEndpointResponse(long containerId) { + OzoneStorageContainerManager reconSCM = + cluster.getReconServer().getReconStorageContainerManager(); + ReconContainerManager reconContainerManager = + (ReconContainerManager) reconSCM.getContainerManager(); + ContainerHealthSchemaManager containerHealthSchemaManager = + reconContainerManager.getContainerSchemaManager(); + ReconOMMetadataManager omMetadataManagerInstance = + (ReconOMMetadataManager) + cluster.getReconServer().getOzoneManagerServiceProvider() + .getOMMetadataManagerInstance(); + ContainerEndpoint containerEndpoint = + new ContainerEndpoint(reconSCM, containerHealthSchemaManager, + cluster.getReconServer().getReconNamespaceSummaryManager(), + cluster.getReconServer().getReconContainerMetadataManager(), + omMetadataManagerInstance); + return containerEndpoint.getKeysForContainer(containerId, 10, ""); + } + + private void writeTestData(String volumeName, String bucketName, + String keyPath, String data) throws Exception { + try (OzoneOutputStream out = client.getObjectStore().getVolume(volumeName) + .getBucket(bucketName) + .createKey(keyPath, data.length())) { + out.write(data.getBytes(StandardCharsets.UTF_8)); + } + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 4c059be1b542..ca8fcae6643b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -54,6 +54,7 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; @@ -61,6 +62,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.fail; @@ -127,15 +129,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } /** @@ -461,21 +461,19 @@ private void assertTableRowCount(Table table, int expectedCount, private boolean assertTableRowCount(int expectedCount, Table table, boolean isRecon) { - long count = 0L; - try { + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { if (isRecon) { - count = cluster.getReconServer().getOzoneManagerServiceProvider() - .getOMMetadataManagerInstance().countRowsInTable(table); + count.set(cluster.getReconServer().getOzoneManagerServiceProvider() + .getOMMetadataManagerInstance().countRowsInTable(table)); } else { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + count.set(cluster.getOzoneManager().getMetadataManager() + .countRowsInTable(table)); } LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("Test failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } private void syncDataFromOM() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java index 9c0f09d760ef..66be107ebf64 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java @@ -19,9 +19,12 @@ import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.node.NodeStatus; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; @@ -37,6 +40,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Test Recon SCM Snapshot Download implementation. @@ -49,6 +53,8 @@ public class TestReconScmSnapshot { @BeforeEach public void setup() throws Exception { conf = new OzoneConfiguration(); + conf.set("ozone.scm.stale.node.interval", "6s"); + conf.set("ozone.scm.dead.node.interval", "8s"); conf.setBoolean( ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, true); conf.setInt(ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, 0); @@ -119,6 +125,50 @@ public static void testSnapshot(MiniOzoneCluster cluster) throws Exception { assertEquals(keyCountAfter, keyCountBefore); } + @Test + public void testExplicitRemovalOfNode() throws Exception { + ReconNodeManager nodeManager = (ReconNodeManager) ozoneCluster.getReconServer() + .getReconStorageContainerManager().getScmNodeManager(); + long nodeDBCountBefore = nodeManager.getNodeDBKeyCount(); + List allNodes = nodeManager.getAllNodes(); + assertEquals(nodeDBCountBefore, allNodes.size()); + + DatanodeDetails datanodeDetails = allNodes.get(3); + ozoneCluster.shutdownHddsDatanode(datanodeDetails); + + GenericTestUtils.waitFor(() -> { + try { + return nodeManager.getNodeStatus(datanodeDetails).isDead(); + } catch (NodeNotFoundException e) { + fail("getNodeStatus() Failed for " + datanodeDetails.getUuid(), e); + throw new RuntimeException(e); + } + }, 2000, 10000); + + // Even after one node is DEAD, node manager is still keep tracking the DEAD node. + long nodeDBCountAfter = nodeManager.getNodeDBKeyCount(); + assertEquals(nodeDBCountAfter, 4); + + final NodeStatus nStatus = nodeManager.getNodeStatus(datanodeDetails); + + final HddsProtos.NodeOperationalState backupOpState = + datanodeDetails.getPersistedOpState(); + final long backupOpStateExpiry = + datanodeDetails.getPersistedOpStateExpiryEpochSec(); + assertEquals(backupOpState, nStatus.getOperationalState()); + assertEquals(backupOpStateExpiry, nStatus.getOpStateExpiryEpochSeconds()); + + // Now removing the DEAD node from both node DB and node manager memory. + nodeManager.removeNode(datanodeDetails); + + // Now check the count of datanodes node DB has and node manager is tracking in memory + nodeDBCountAfter = nodeManager.getNodeDBKeyCount(); + assertEquals(nodeDBCountAfter, 3); + + allNodes = nodeManager.getAllNodes(); + assertEquals(nodeDBCountAfter, allNodes.size()); + } + @AfterEach public void shutdown() throws Exception { if (ozoneCluster != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java index 44385698c5c3..cba7311b3b4f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java @@ -69,11 +69,11 @@ public void init() throws Exception { conf.set(HDDS_PIPELINE_REPORT_INTERVAL, "5s"); ReconTaskConfig taskConfig = conf.getObject(ReconTaskConfig.class); - taskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(15)); + taskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(10)); conf.setFromObject(taskConfig); conf.set("ozone.scm.stale.node.interval", "6s"); - conf.set("ozone.scm.dead.node.interval", "10s"); + conf.set("ozone.scm.dead.node.interval", "8s"); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .includeRecon(true).build(); cluster.waitForClusterToBeReady(); @@ -246,6 +246,7 @@ public void testEmptyMissingContainerDownNode() throws Exception { return (allEmptyMissingContainers.size() == 1); }); + // Now add a container to key mapping count as 3. This data is used to // identify if container is empty in terms of keys mapped to container. try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { @@ -274,6 +275,26 @@ public void testEmptyMissingContainerDownNode() throws Exception { return (allEmptyMissingContainers.isEmpty()); }); + // Now remove keys from container. This data is used to + // identify if container is empty in terms of keys mapped to container. + try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + reconContainerMetadataManager + .batchStoreContainerKeyCounts(rdbBatchOperation, containerID, 0L); + reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); + } + + // Check existing container state in UNHEALTHY_CONTAINER table + // will be updated as EMPTY_MISSING + LambdaTestUtils.await(25000, 1000, () -> { + List allEmptyMissingContainers = + reconContainerManager.getContainerSchemaManager() + .getUnhealthyContainers( + ContainerSchemaDefinition.UnHealthyContainerStates. + EMPTY_MISSING, + 0, 1000); + return (allEmptyMissingContainers.size() == 1); + }); + // Now restart the cluster and verify the container is no longer missing. cluster.restartHddsDatanode(pipeline.getFirstNode(), true); LambdaTestUtils.await(25000, 1000, () -> { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index 9589b1c40056..f51d12a7c53f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -38,8 +38,10 @@ import java.util.Collections; import java.util.List; import java.util.Optional; +import java.util.Map; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hdds.JsonTestUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -65,8 +67,6 @@ import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; -import com.google.gson.Gson; -import com.google.gson.internal.LinkedTreeMap; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -202,7 +202,7 @@ public void testOmDBSyncing() throws Exception { // verify sequence number after full snapshot assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); //add 4 keys to check for delta updates addKeys(1, 5); @@ -220,7 +220,7 @@ public void testOmDBSyncing() throws Exception { //verify sequence number after Delta Updates assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); long beforeRestartSnapShotTimeStamp = getReconTaskAttributeFromJson( taskStatusResponse, @@ -260,7 +260,7 @@ public void testOmDBSyncing() throws Exception { //verify sequence number after Delta Updates assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); } // This test simulates the mis-match in sequence number between Recon OM @@ -314,7 +314,7 @@ public void testOmDBSyncWithSeqNumberMismatch() throws Exception { // verify sequence number after incremental delta snapshot assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); String volume = "vol15"; String bucket = "bucket15"; @@ -356,7 +356,7 @@ public void testOmDBSyncWithSeqNumberMismatch() throws Exception { reconLatestSeqNumber = ((RDBStore) reconMetadataManagerInstance.getStore()).getDb() .getLatestSequenceNumber(); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); assertEquals(omLatestSeqNumber, reconLatestSeqNumber); reconLatestSeqNumber = ((RDBStore) reconMetadataManagerInstance.getStore()).getDb() @@ -381,16 +381,23 @@ private static OmKeyLocationInfoGroup getOmKeyLocationInfoGroup() { private long getReconTaskAttributeFromJson(String taskStatusResponse, String taskName, - String entityAttribute) { - ArrayList taskStatusList = new Gson() - .fromJson(taskStatusResponse, ArrayList.class); - Optional taskEntity = - taskStatusList - .stream() - .filter(task -> task.get("taskName").equals(taskName)) - .findFirst(); - assertTrue(taskEntity.isPresent()); - return (long) (double) taskEntity.get().get(entityAttribute); + String entityAttribute) + throws IOException { + List> taskStatusList = + JsonTestUtils.readTreeAsListOfMaps(taskStatusResponse); + + // Stream through the list to find the task entity matching the taskName + Optional> taskEntity = taskStatusList.stream() + .filter(task -> taskName.equals(task.get("taskName"))) + .findFirst(); + + if (taskEntity.isPresent()) { + Number number = (Number) taskEntity.get().get(entityAttribute); + return number.longValue(); + } else { + throw new IOException( + "Task entity for task name " + taskName + " not found"); + } } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java index 8baad9cb97b4..0d7cb5fbf075 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java @@ -73,12 +73,12 @@ public void setup() throws Exception { dbConf.setSyncOption(true); conf.setFromObject(dbConf); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(OM_SERVICE_ID) - .setNumDatanodes(1) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(3) - .includeRecon(true) - .build(); + .setNumDatanodes(1) + .includeRecon(true); + cluster = builder.build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(OM_SERVICE_ID, conf); objectStore = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 9d0552a169fe..6f6c5439d8c1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -96,7 +96,7 @@ public void init() throws Exception { conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) .setNumOfActiveSCMs(numOfSCMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java index a79e2de245da..3a9f7e322b9e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java @@ -80,7 +80,6 @@ public class TestOzoneContainerUpgradeShell { private static final Logger LOG = LoggerFactory.getLogger(TestOzoneContainerUpgradeShell.class); - private static String omServiceId; private static MiniOzoneCluster cluster = null; private static OzoneClient client; private static OzoneConfiguration conf = null; @@ -88,12 +87,7 @@ public class TestOzoneContainerUpgradeShell { private static final String BUCKET_NAME = UUID.randomUUID().toString(); protected static void startCluster() throws Exception { - // Init HA cluster - omServiceId = "om-service-test-upgrade-container1"; - final int numDNs = 3; cluster = MiniOzoneCluster.newBuilder(conf) - .setOMServiceId(omServiceId) - .setNumDatanodes(numDNs) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index b50cea759ea4..15d9746fcb6a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -88,7 +88,6 @@ protected static void startCluster() throws Exception { omServiceId = "om-service-test1"; final int numDNs = 3; cluster = MiniOzoneCluster.newBuilder(conf) - .setOMServiceId(omServiceId) .setNumDatanodes(numDNs) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 6eb892659820..5a46d571c6b7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -23,11 +23,13 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; -import java.util.ArrayList; +import java.net.URI; +import java.util.Map; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.UUID; +import java.util.concurrent.TimeoutException; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.crypto.key.KeyProvider; @@ -50,6 +52,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; @@ -63,28 +66,32 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.service.OpenKeyCleanupService; import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.ozone.om.TrashPolicyOzone; +import org.apache.hadoop.hdds.JsonTestUtils; import com.google.common.base.Strings; -import com.google.gson.Gson; -import com.google.gson.internal.LinkedTreeMap; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; - import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.OBJECT_STORE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -104,6 +111,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -130,7 +139,7 @@ public class TestOzoneShellHA { private static File baseDir; private static File testFile; private static String testFilePathString; - private static MiniOzoneCluster cluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static File testDir; private static MiniKMS miniKMS; private static OzoneClient client; @@ -186,11 +195,13 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { final int numDNs = 5; conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI(miniKMS)); - cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(omServiceId) + conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 10); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(numDNs) - .build(); + .setNumDatanodes(numDNs); + cluster = builder.build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); } @@ -287,8 +298,7 @@ private void executeWithError(OzoneShell shell, String[] args, * @return the leader OM's Node ID in the MiniOzoneHACluster. */ private String getLeaderOMNodeId() { - MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster; - OzoneManager omLeader = haCluster.getOMLeader(); + OzoneManager omLeader = cluster.getOMLeader(); assertNotNull(omLeader, "There should be a leader OM at this point."); return omLeader.getOMNodeId(); } @@ -410,12 +420,11 @@ private int getNumOfBuckets(String bucketPrefix) } /** - * Parse output into ArrayList with Gson. + * Parse output into ArrayList with Jackson. * @return ArrayList */ - private ArrayList> parseOutputIntoArrayList() - throws UnsupportedEncodingException { - return new Gson().fromJson(out.toString(DEFAULT_ENCODING), ArrayList.class); + private List> parseOutputIntoArrayList() throws IOException { + return JsonTestUtils.readTreeAsListOfMaps(out.toString(DEFAULT_ENCODING)); } @Test @@ -489,7 +498,7 @@ public void testOzoneShCmdURIs() { * Test ozone shell list command. */ @Test - public void testOzoneShCmdList() throws UnsupportedEncodingException { + public void testOzoneShCmdList() throws IOException { // Part of listing keys test. generateKeys("/volume4", "/bucket", ""); final String destinationBucket = "o3://" + omServiceId + "/volume4/bucket"; @@ -575,7 +584,7 @@ public void testOzoneAdminCmdList() throws UnsupportedEncodingException { @Test public void testAdminCmdListOpenFiles() - throws IOException, InterruptedException { + throws IOException, InterruptedException, TimeoutException { OzoneConfiguration conf = cluster.getConf(); final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; @@ -692,6 +701,249 @@ public void testAdminCmdListOpenFiles() } + @Test + public void testAdminCmdListOpenFilesWithDeletedKeys() + throws Exception { + + OzoneConfiguration conf = cluster.getConf(); + final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; + + OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf); + clientConf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); + FileSystem fs = FileSystem.get(clientConf); + + assertNotEquals(fs.getConf().get(OZONE_FS_HSYNC_ENABLED), + "false", OZONE_FS_HSYNC_ENABLED + " is set to false " + + "by external force. Must be true to allow hsync to function"); + + final String volumeName = "volume-list-del"; + final String bucketName = "buck1"; + + String dir1 = hostPrefix + + OM_KEY_PREFIX + volumeName + + OM_KEY_PREFIX + bucketName + + OM_KEY_PREFIX + "dir1"; + // Create volume, bucket, dir + assertTrue(fs.mkdirs(new Path(dir1))); + String keyPrefix = OM_KEY_PREFIX + "key"; + + final int numKeys = 5; + String[] keys = new String[numKeys]; + + for (int i = 0; i < numKeys; i++) { + keys[i] = dir1 + keyPrefix + i; + } + + String pathToBucket = "/" + volumeName + "/" + bucketName; + FSDataOutputStream[] streams = new FSDataOutputStream[numKeys]; + + try { + // Create multiple keys and hold them open + for (int i = 0; i < numKeys; i++) { + streams[i] = fs.create(new Path(keys[i])); + streams[i].write(1); + } + + // Wait for DB flush + cluster.getOzoneManager().awaitDoubleBufferFlush(); + + // hsync last key + streams[numKeys - 1].hsync(); + // Wait for flush + cluster.getOzoneManager().awaitDoubleBufferFlush(); + final String[] args = new String[] {"om", "lof", "--service-id", + omServiceId, "--show-deleted", "-p", pathToBucket}; + + execute(ozoneAdminShell, args); + String cmdRes = getStdOut(); + + // Verify that key is hsync'ed + assertTrue(cmdRes.contains("\tYes\t\tNo"), "key should be hsync'ed and not deleted"); + + // Verify json output + String[] args1 = new String[] {"om", "lof", "--service-id", omServiceId, "--show-deleted", + "--json", "-p", pathToBucket}; + execute(ozoneAdminShell, args1); + cmdRes = getStdOut(); + + assertTrue(!cmdRes.contains(OzoneConsts.DELETED_HSYNC_KEY), + "key should not have deletedHsyncKey metadata"); + + // Suspend open key cleanup service so that key remains in openKeyTable for verification + OpenKeyCleanupService openKeyCleanupService = + (OpenKeyCleanupService) cluster.getOzoneManager().getKeyManager().getOpenKeyCleanupService(); + openKeyCleanupService.suspend(); + OzoneFsShell shell = new OzoneFsShell(clientConf); + // Delete directory dir1 + ToolRunner.run(shell, new String[]{"-rm", "-R", "-skipTrash", dir1}); + + GenericTestUtils.waitFor(() -> { + try { + execute(ozoneAdminShell, args); + String cmdRes1 = getStdOut(); + // When directory purge request is triggered it should add DELETED_HSYNC_KEY metadata in hsync openKey + // And list open key should show as deleted + return cmdRes1.contains("\tYes\t\tYes"); + } catch (Throwable t) { + LOG.warn("Failed to list open key", t); + return false; + } + }, 1000, 10000); + + // Now check json output + execute(ozoneAdminShell, args1); + cmdRes = getStdOut(); + assertTrue(cmdRes.contains(OzoneConsts.DELETED_HSYNC_KEY), + "key should have deletedHsyncKey metadata"); + + // Verify result should not have deleted hsync keys when --show-deleted is not in the command argument + String[] args2 = new String[] {"om", "lof", "--service-id", omServiceId, "-p", pathToBucket}; + execute(ozoneAdminShell, args2); + cmdRes = getStdOut(); + // Verify that deletedHsyncKey is not in the result + assertTrue(!cmdRes.contains("\tYes\t\tYes"), "key should be hsync'ed and not deleted"); + + // Verify with json result + args2 = new String[] {"om", "lof", "--service-id", omServiceId, "--json", "-p", pathToBucket}; + execute(ozoneAdminShell, args2); + cmdRes = getStdOut(); + // Verify that deletedHsyncKey is not in the result + assertTrue(!cmdRes.contains(OzoneConsts.DELETED_HSYNC_KEY), + "key should not have deletedHsyncKey metadata"); + + } finally { + // Cleanup + IOUtils.closeQuietly(streams); + } + } + + @Test + public void testAdminCmdListOpenFilesWithOverwrittenKeys() + throws Exception { + + OzoneConfiguration conf = cluster.getConf(); + final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; + + OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf); + clientConf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); + FileSystem fs = FileSystem.get(clientConf); + + assertNotEquals(fs.getConf().get(OZONE_FS_HSYNC_ENABLED), + "false", OZONE_FS_HSYNC_ENABLED + " is set to false " + + "by external force. Must be true to allow hsync to function"); + + final String volumeName = "volume-list-del"; + final String bucketName = "buck1"; + + String dir1 = hostPrefix + + OM_KEY_PREFIX + volumeName + + OM_KEY_PREFIX + bucketName + + OM_KEY_PREFIX + "dir1"; + // Create volume, bucket, dir + assertTrue(fs.mkdirs(new Path(dir1))); + String keyPrefix = OM_KEY_PREFIX + "key"; + + final int numKeys = 5; + String[] keys = new String[numKeys]; + + for (int i = 0; i < numKeys; i++) { + keys[i] = dir1 + keyPrefix + i; + } + + String pathToBucket = "/" + volumeName + "/" + bucketName; + FSDataOutputStream[] streams = new FSDataOutputStream[numKeys]; + + try { + // Create multiple keys and hold them open + for (int i = 0; i < numKeys; i++) { + streams[i] = fs.create(new Path(keys[i])); + streams[i].write(1); + } + + // Wait for DB flush + cluster.getOzoneManager().awaitDoubleBufferFlush(); + + // hsync last key + streams[numKeys - 1].hsync(); + // Wait for flush + cluster.getOzoneManager().awaitDoubleBufferFlush(); + final String[] args = new String[] {"om", "lof", "--service-id", + omServiceId, "--show-deleted", "--show-overwritten", "-p", pathToBucket}; + + execute(ozoneAdminShell, args); + String cmdRes = getStdOut(); + + // Verify that key is hsync'ed + assertTrue(cmdRes.contains("\tYes\t\tNo\t\tNo"), "key should be hsync'ed and not deleted, not overwritten"); + + execute(ozoneAdminShell, new String[] {"om", "lof", "--service-id", + omServiceId, "--show-overwritten", "-p", pathToBucket}); + cmdRes = getStdOut(); + // Verify that key is hsync'ed + assertTrue(cmdRes.contains("\tYes\t\tNo"), "key should be hsync'ed and not overwritten"); + + // Verify json output + String[] args1 = new String[] {"om", "lof", "--service-id", omServiceId, "--show-deleted", "--show-overwritten", + "--json", "-p", pathToBucket}; + execute(ozoneAdminShell, args1); + cmdRes = getStdOut(); + + assertTrue(!cmdRes.contains(OzoneConsts.DELETED_HSYNC_KEY), + "key should not have deletedHsyncKey metadata"); + assertTrue(!cmdRes.contains(OzoneConsts.OVERWRITTEN_HSYNC_KEY), + "key should not have overwrittenHsyncKey metadata"); + + // Suspend open key cleanup service so that key remains in openKeyTable for verification + OpenKeyCleanupService openKeyCleanupService = + (OpenKeyCleanupService) cluster.getOzoneManager().getKeyManager().getOpenKeyCleanupService(); + openKeyCleanupService.suspend(); + // overwrite last key + try (FSDataOutputStream os = fs.create(new Path(keys[numKeys - 1]))) { + os.write(2); + } + + GenericTestUtils.waitFor(() -> { + try { + execute(ozoneAdminShell, args); + String cmdRes1 = getStdOut(); + // When hsync file is overwritten, it should add OVERWRITTEN_HSYNC_KEY metadata in hsync openKey + // And list open key should show as overwritten + return cmdRes1.contains("\tYes\t\tNo\t\tYes"); + } catch (Throwable t) { + LOG.warn("Failed to list open key", t); + return false; + } + }, 1000, 10000); + + // Now check json output + execute(ozoneAdminShell, args1); + cmdRes = getStdOut(); + assertTrue(!cmdRes.contains(OzoneConsts.DELETED_HSYNC_KEY), + "key should not have deletedHsyncKey metadata"); + assertTrue(cmdRes.contains(OzoneConsts.OVERWRITTEN_HSYNC_KEY), + "key should have overwrittenHsyncKey metadata"); + + // Verify result should not have overwritten hsync keys when --show-overwritten is not in the command argument + String[] args2 = new String[] {"om", "lof", "--service-id", omServiceId, "-p", pathToBucket}; + execute(ozoneAdminShell, args2); + cmdRes = getStdOut(); + // Verify that overwrittenHsyncKey is not in the result + assertTrue(!cmdRes.contains("\tYes\t\tYes"), "key should be hsync'ed and not overwritten"); + + // Verify with json result + args2 = new String[] {"om", "lof", "--service-id", omServiceId, "--json", "-p", pathToBucket}; + execute(ozoneAdminShell, args2); + cmdRes = getStdOut(); + // Verify that overwrittenHsyncKey is not in the result + assertTrue(!cmdRes.contains(OzoneConsts.OVERWRITTEN_HSYNC_KEY), + "key should not have overwrittenHsyncKey metadata"); + + } finally { + // Cleanup + IOUtils.closeQuietly(streams); + } + } + /** * Return stdout as a String, then clears existing output. */ @@ -884,6 +1136,34 @@ public void testLinkBucketOrphan() throws Exception { } } + @Test + @Timeout(10) + public void testListBucket() throws Exception { + final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; + OzoneConfiguration clientConf = + getClientConfForOFS(hostPrefix, cluster.getConf()); + int pageSize = 20; + clientConf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize); + URI uri = FileSystem.getDefaultUri(clientConf); + clientConf.setBoolean(String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); + OzoneFsShell shell = new OzoneFsShell(clientConf); + + String volName = "testlistbucket"; + int numBuckets = pageSize; + + try { + generateBuckets("/" + volName, numBuckets); + out.reset(); + int res = ToolRunner.run(shell, new String[]{"-ls", "/" + volName}); + assertEquals(0, res); + String r = out.toString(DEFAULT_ENCODING); + assertThat(r).matches("(?s)^Found " + numBuckets + " items.*"); + + } finally { + shell.close(); + } + } + @Test public void testDeleteTrashNoSkipTrash() throws Exception { @@ -1774,7 +2054,7 @@ private void getVolume(String volumeName) { } public void testListVolumeBucketKeyShouldPrintValidJsonArray() - throws UnsupportedEncodingException { + throws IOException { final List testVolumes = Arrays.asList("jsontest-vol1", "jsontest-vol2", "jsontest-vol3"); @@ -1799,7 +2079,7 @@ public void testListVolumeBucketKeyShouldPrintValidJsonArray() execute(ozoneShell, new String[] {"volume", "list"}); // Expect valid JSON array - final ArrayList> volumeListOut = + final List> volumeListOut = parseOutputIntoArrayList(); // Can include s3v and volumes from other test cases that aren't cleaned up, // hence >= instead of equals. @@ -1814,7 +2094,7 @@ public void testListVolumeBucketKeyShouldPrintValidJsonArray() execute(ozoneShell, new String[] {"bucket", "list", firstVolumePrefix}); // Expect valid JSON array as well - final ArrayList> bucketListOut = + final List> bucketListOut = parseOutputIntoArrayList(); assertEquals(testBuckets.size(), bucketListOut.size()); final HashSet bucketSet = new HashSet<>(testBuckets); @@ -1827,7 +2107,7 @@ public void testListVolumeBucketKeyShouldPrintValidJsonArray() execute(ozoneShell, new String[] {"key", "list", keyPathPrefix}); // Expect valid JSON array as well - final ArrayList> keyListOut = + final List> keyListOut = parseOutputIntoArrayList(); assertEquals(testKeys.size(), keyListOut.size()); final HashSet keySet = new HashSet<>(testKeys); @@ -1975,9 +2255,10 @@ public void testVolumeListKeys() OMException exception = (OMException) execution.getCause(); assertEquals(VOLUME_NOT_FOUND, exception.getResult()); } - - @Test - public void testRecursiveVolumeDelete() + + @ParameterizedTest + @ValueSource(ints = {1, 5}) + public void testRecursiveVolumeDelete(int threadCount) throws Exception { String volume1 = "volume10"; String volume2 = "volume20"; @@ -1986,47 +2267,19 @@ public void testRecursiveVolumeDelete() // Create bucket bucket1 with layout FILE_SYSTEM_OPTIMIZED // Insert some keys into it generateKeys(OZONE_URI_DELIMITER + volume1, - "/bucketfso", + "/fsobucket1", BucketLayout.FILE_SYSTEM_OPTIMIZED.toString()); - // Create another volume volume2 with bucket and some keys into it. + // Create another volume volume2 with bucket and some keys into it. generateKeys(OZONE_URI_DELIMITER + volume2, "/bucket2", BucketLayout.FILE_SYSTEM_OPTIMIZED.toString()); - // Create OBS bucket in volume1 - String[] args = new String[] {"bucket", "create", "--layout", - BucketLayout.OBJECT_STORE.toString(), volume1 + "/bucketobs"}; - execute(ozoneShell, args); - out.reset(); - - // Insert few keys into OBS bucket - String keyName = OZONE_URI_DELIMITER + volume1 + "/bucketobs" + - OZONE_URI_DELIMITER + "key"; - for (int i = 0; i < 5; i++) { - args = new String[] { - "key", "put", "o3://" + omServiceId + keyName + i, - testFile.getPath()}; - execute(ozoneShell, args); - } - out.reset(); - - // Create Legacy bucket in volume1 - args = new String[] {"bucket", "create", "--layout", - BucketLayout.LEGACY.toString(), volume1 + "/bucketlegacy"}; - execute(ozoneShell, args); - out.reset(); - - // Insert few keys into legacy bucket - keyName = OZONE_URI_DELIMITER + volume1 + "/bucketlegacy" + - OZONE_URI_DELIMITER + "key"; - for (int i = 0; i < 5; i++) { - args = new String[] { - "key", "put", "o3://" + omServiceId + keyName + i, - testFile.getPath()}; - execute(ozoneShell, args); - } - out.reset(); + createBucketAndGenerateKeys(volume1, FILE_SYSTEM_OPTIMIZED, "fsobucket2"); + createBucketAndGenerateKeys(volume1, OBJECT_STORE, "obsbucket1"); + createBucketAndGenerateKeys(volume1, OBJECT_STORE, "obsbucket2"); + createBucketAndGenerateKeys(volume1, LEGACY, "legacybucket1"); + createBucketAndGenerateKeys(volume1, LEGACY, "legacybucket2"); // Try volume delete without recursive // It should fail as volume is not empty @@ -2041,22 +2294,50 @@ public void testRecursiveVolumeDelete() assertEquals(client.getObjectStore().getVolume(volume1) .getName(), volume1); - // Delete volume1(containing OBS, FSO and Legacy buckets) recursively - args = - new String[] {"volume", "delete", volume1, "-r", "--yes"}; + // Delete volume1(containing OBS, FSO and Legacy buckets) recursively with thread count + String[] args = new String[] {"volume", "delete", volume1, "-r", "--yes", "-t", String.valueOf(threadCount)}; execute(ozoneShell, args); out.reset(); + // volume1 should not exist + omExecution = assertThrows(OMException.class, + () -> client.getObjectStore().getVolume(volume1)); + assertEquals(VOLUME_NOT_FOUND, omExecution.getResult()); + // volume2 should still exist assertEquals(client.getObjectStore().getVolume(volume2) .getName(), volume2); - // volume1 should not exist + // Delete volume2 recursively + args = new String[] {"volume", "delete", volume2, "-r", "--yes"}; + execute(ozoneShell, args); + out.reset(); + + // volume2 should not exist omExecution = assertThrows(OMException.class, - () -> client.getObjectStore().getVolume(volume1)); + () -> client.getObjectStore().getVolume(volume2)); assertEquals(VOLUME_NOT_FOUND, omExecution.getResult()); } + private void createBucketAndGenerateKeys(String volume, BucketLayout layout, String bucketName) { + // Create bucket + String[] args = new String[] {"bucket", "create", volume + "/" + bucketName, + "--layout", layout.toString()}; + execute(ozoneShell, args); + out.reset(); + + // Insert keys + String keyName = OZONE_URI_DELIMITER + volume + "/" + bucketName + + OZONE_URI_DELIMITER + "key"; + for (int i = 0; i < 5; i++) { + args = new String[] { + "key", "put", "o3://" + omServiceId + keyName + i, + testFile.getPath()}; + execute(ozoneShell, args); + } + out.reset(); + } + @Test public void testLinkedAndNonLinkedBucketMetaData() throws Exception { @@ -2079,7 +2360,7 @@ public void testLinkedAndNonLinkedBucketMetaData() execute(ozoneShell, new String[] {"bucket", "list", "/volume1"}); // Expect valid JSON array - final ArrayList> bucketListOut = + final List> bucketListOut = parseOutputIntoArrayList(); assertEquals(1, bucketListOut.size()); @@ -2098,7 +2379,7 @@ public void testLinkedAndNonLinkedBucketMetaData() execute(ozoneShell, new String[] {"bucket", "list", "/volume1"}); // Expect valid JSON array - final ArrayList> bucketListLinked = + final List> bucketListLinked = parseOutputIntoArrayList(); assertEquals(2, bucketListLinked.size()); @@ -2117,6 +2398,43 @@ public void testLinkedAndNonLinkedBucketMetaData() out.reset(); } + @Test + public void testKeyDeleteLegacyWithEnableFileSystemPath() throws IOException { + String volumeName = "vol5"; + String bucketName = "legacybucket"; + String[] args = new String[] {"volume", "create", "o3://" + omServiceId + OZONE_URI_DELIMITER + volumeName}; + execute(ozoneShell, args); + + args = new String[] {"bucket", "create", "o3://" + omServiceId + OZONE_URI_DELIMITER + + volumeName + OZONE_URI_DELIMITER + bucketName, "--layout", BucketLayout.LEGACY.toString()}; + execute(ozoneShell, args); + + String dirPath = OZONE_URI_DELIMITER + volumeName + OZONE_URI_DELIMITER + + bucketName + OZONE_URI_DELIMITER + "dir/"; + String keyPath = dirPath + "key1"; + + // Create key, it will generate two keys, one with dirPath other with keyPath + args = new String[] {"key", "put", "o3://" + omServiceId + keyPath, testFile.getPath()}; + execute(ozoneShell, args); + + // Enable fileSystem path for client config + String fileSystemEnable = generateSetConfString(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + // Delete dirPath key, it should fail + args = new String[] {fileSystemEnable, "key", "delete", dirPath}; + execute(ozoneShell, args); + + // Check number of keys + OzoneVolume volume = client.getObjectStore().getVolume(volumeName); + OzoneBucket bucket = volume.getBucket(bucketName); + List files = bucket.listStatus("", true, "", 5); + // Two keys should still exist, dirPath and keyPath + assertEquals(2, files.size()); + + // cleanup + args = new String[] {"volume", "delete", volumeName, "-r", "--yes"}; + execute(ozoneShell, args); + } + private static String getKeyProviderURI(MiniKMS kms) { return KMSClientProvider.SCHEME_NAME + "://" + kms.getKMSUrl().toExternalForm().replace("://", "@"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java index 6abfbed2bd38..5d6475071419 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java @@ -96,8 +96,7 @@ public class TestOzoneTenantShell { private static final File AUDIT_LOG_FILE = new File("audit.log"); private static OzoneConfiguration conf = null; - private static MiniOzoneCluster cluster = null; - private static MiniOzoneHAClusterImpl haCluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static OzoneShell ozoneSh = null; private static TenantShell tenantShell = null; @@ -153,12 +152,11 @@ public static void init() throws Exception { // Init cluster omServiceId = "om-service-test1"; numOfOMs = 3; - cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) - .withoutDatanodes() // Remove this once we are actually writing data - .build(); - haCluster = (MiniOzoneHAClusterImpl) cluster; + .withoutDatanodes(); // Remove this once we are actually writing data + cluster = builder.build(); cluster.waitForClusterToBeReady(); } @@ -377,17 +375,17 @@ public void testAssignAdmin() throws IOException { executeHA(tenantShell, new String[] {"--verbose", "user", "assign-admin", tenantName + "$" + userName, "--tenant=" + tenantName, "--delegated=true"}); - checkOutput(out, "{\n" + " \"accessId\": \"devaa$alice\",\n" - + " \"tenantId\": \"devaa\",\n" + " \"isAdmin\": true,\n" - + " \"isDelegatedAdmin\": true\n" + "}\n", true, true); + checkOutput(out, "{\n" + " \"accessId\" : \"devaa$alice\",\n" + + " \"tenantId\" : \"devaa\",\n" + " \"isAdmin\" : true,\n" + + " \"isDelegatedAdmin\" : true\n" + "}\n", true, true); checkOutput(err, "", true); // Clean up executeHA(tenantShell, new String[] {"--verbose", "user", "revoke-admin", tenantName + "$" + userName, "--tenant=" + tenantName}); - checkOutput(out, "{\n" + " \"accessId\": \"devaa$alice\",\n" - + " \"tenantId\": \"devaa\",\n" + " \"isAdmin\": false,\n" - + " \"isDelegatedAdmin\": false\n" + "}\n", true, true); + checkOutput(out, "{\n" + " \"accessId\" : \"devaa$alice\",\n" + + " \"tenantId\" : \"devaa\",\n" + " \"isAdmin\" : false,\n" + + " \"isDelegatedAdmin\" : false\n" + "}\n", true, true); checkOutput(err, "", true); executeHA(tenantShell, new String[] { @@ -460,7 +458,7 @@ public void testOzoneTenantBasicOperations() throws IOException { executeHA(tenantShell, new String[] {"list", "--json"}); // Not checking the full output here - checkOutput(out, "\"tenantId\": \"dev\",", false); + checkOutput(out, "\"tenantId\" : \"dev\",", false); checkOutput(err, "", true); // Attempt user getsecret before assignment, should fail @@ -529,16 +527,26 @@ public void testOzoneTenantBasicOperations() throws IOException { executeHA(tenantShell, new String[] { "user", "info", "--json", "bob"}); - checkOutput(out, "{\n" + " \"user\": \"bob\",\n" + " \"tenants\": [\n" - + " {\n" + " \"accessId\": \"research$bob\",\n" - + " \"tenantId\": \"research\",\n" + " \"isAdmin\": false,\n" - + " \"isDelegatedAdmin\": false\n" + " },\n" + " {\n" - + " \"accessId\": \"finance$bob\",\n" - + " \"tenantId\": \"finance\",\n" + " \"isAdmin\": false,\n" - + " \"isDelegatedAdmin\": false\n" + " },\n" + " {\n" - + " \"accessId\": \"dev$bob\",\n" - + " \"tenantId\": \"dev\",\n" + " \"isAdmin\": true,\n" - + " \"isDelegatedAdmin\": true\n" + " }\n" + " ]\n" + "}\n", + checkOutput(out, + "{\n" + + " \"user\" : \"bob\",\n" + + " \"tenants\" : [ {\n" + + " \"accessId\" : \"research$bob\",\n" + + " \"tenantId\" : \"research\",\n" + + " \"isAdmin\" : false,\n" + + " \"isDelegatedAdmin\" : false\n" + + " }, {\n" + + " \"accessId\" : \"finance$bob\",\n" + + " \"tenantId\" : \"finance\",\n" + + " \"isAdmin\" : false,\n" + + " \"isDelegatedAdmin\" : false\n" + + " }, {\n" + + " \"accessId\" : \"dev$bob\",\n" + + " \"tenantId\" : \"dev\",\n" + + " \"isAdmin\" : true,\n" + + " \"isDelegatedAdmin\" : true\n" + + " } ]\n" + + "}\n", true, true); checkOutput(err, "", true); @@ -641,7 +649,7 @@ public void testOzoneTenantBasicOperations() throws IOException { // Because InMemoryMultiTenantAccessController is used in OMs for this // integration test, we need to trigger BG sync on all OMs just // in case a leader changed right after the last operation. - haCluster.getOzoneManagersList().forEach(om -> om.getMultiTenantManager() + cluster.getOzoneManagersList().forEach(om -> om.getMultiTenantManager() .getOMRangerBGSyncService().triggerRangerSyncOnce()); // Delete dev volume should fail because the volume reference count > 0L @@ -664,8 +672,8 @@ public void testOzoneTenantBasicOperations() throws IOException { // Then delete tenant, should succeed executeHA(tenantShell, new String[] {"--verbose", "delete", "dev"}); - checkOutput(out, "{\n" + " \"tenantId\": \"dev\",\n" - + " \"volumeName\": \"dev\",\n" + " \"volumeRefCount\": 0\n" + "}\n", + checkOutput(out, "{\n" + " \"tenantId\" : \"dev\",\n" + + " \"volumeName\" : \"dev\",\n" + " \"volumeRefCount\" : 0\n" + "}\n", true, true); checkOutput(err, "Deleted tenant 'dev'.\n", false); deleteVolume("dev"); @@ -680,7 +688,7 @@ public void testOzoneTenantBasicOperations() throws IOException { public void testListTenantUsers() throws IOException { executeHA(tenantShell, new String[] {"--verbose", "create", "tenant1"}); checkOutput(out, "{\n" + - " \"tenantId\": \"tenant1\"\n" + "}\n", true, true); + " \"tenantId\" : \"tenant1\"\n" + "}\n", true, true); checkOutput(err, "", true); executeHA(tenantShell, new String[] { @@ -704,10 +712,14 @@ public void testListTenantUsers() throws IOException { executeHA(tenantShell, new String[] { "user", "list", "tenant1", "--json"}); - checkOutput(out, "[\n" + " {\n" + " \"user\": \"bob\",\n" - + " \"accessId\": \"tenant1$bob\"\n" + " },\n" + " {\n" - + " \"user\": \"alice\",\n" + " \"accessId\": \"tenant1$alice\"\n" - + " }\n" + "]\n", true); + checkOutput(out, + "[ {\n" + + " \"user\" : \"bob\",\n" + + " \"accessId\" : \"tenant1$bob\"\n" + + "}, {\n" + + " \"user\" : \"alice\",\n" + + " \"accessId\" : \"tenant1$alice\"\n" + + "} ]\n", true); checkOutput(err, "", true); executeHA(tenantShell, new String[] { @@ -718,8 +730,10 @@ public void testListTenantUsers() throws IOException { executeHA(tenantShell, new String[] { "user", "list", "tenant1", "--prefix=b", "--json"}); - checkOutput(out, "[\n" + " {\n" + " \"user\": \"bob\",\n" - + " \"accessId\": \"tenant1$bob\"\n" + " }\n" + "]\n", true); + checkOutput(out, "[ {\n" + + " \"user\" : \"bob\",\n" + + " \"accessId\" : \"tenant1$bob\"\n" + + "} ]\n", true); checkOutput(err, "", true); int exitCode = executeHA(tenantShell, new String[] { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java index 427b36d9a952..97a43c248a14 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java @@ -67,7 +67,7 @@ public class TestReconfigShell { public static void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); String omServiceId = UUID.randomUUID().toString(); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(1) .setNumOfStorageContainerManagers(1) @@ -95,7 +95,7 @@ public void testDataNodeGetReconfigurableProperties() throws Exception { HddsDatanodeClientProtocolServer server = datanodeService.getClientProtocolServer(); InetSocketAddress socket = server.getClientRpcAddress(); - executeAndAssertProperties(datanodeService.getReconfigurationHandler(), + executeAndAssertProperties(datanodeService.getReconfigurationHandler(), "--service=DATANODE", socket, capture); } } @@ -105,7 +105,7 @@ public void testDataNodeGetReconfigurableProperties() throws Exception { public void testOzoneManagerGetReconfigurationProperties() throws Exception { try (SystemOutCapturer capture = new SystemOutCapturer()) { InetSocketAddress socket = ozoneManager.getOmRpcServerAddr(); - executeAndAssertProperties(ozoneManager.getReconfigurationHandler(), + executeAndAssertProperties(ozoneManager.getReconfigurationHandler(), "--service=OM", socket, capture); } } @@ -116,17 +116,17 @@ public void testStorageContainerManagerGetReconfigurationProperties() try (SystemOutCapturer capture = new SystemOutCapturer()) { InetSocketAddress socket = storageContainerManager.getClientRpcAddress(); executeAndAssertProperties( - storageContainerManager.getReconfigurationHandler(), socket, capture); + storageContainerManager.getReconfigurationHandler(), "--service=SCM", socket, capture); } } private void executeAndAssertProperties( - ReconfigurableBase reconfigurableBase, + ReconfigurableBase reconfigurableBase, String service, InetSocketAddress socket, SystemOutCapturer capture) throws UnsupportedEncodingException { String address = socket.getHostString() + ":" + socket.getPort(); ozoneAdmin.execute( - new String[] {"reconfig", "--address", address, "properties"}); + new String[] {"reconfig", service, "--address", address, "properties"}); assertReconfigurablePropertiesOutput( reconfigurableBase.getReconfigurableProperties(), capture.getOutput()); } @@ -171,7 +171,7 @@ private void executeAndAssertBulkReconfigCount(int except) throws Exception { try (SystemOutCapturer capture = new SystemOutCapturer()) { ozoneAdmin.execute(new String[] { - "reconfig", "--in-service-datanodes", "properties"}); + "reconfig", "--service=DATANODE", "--in-service-datanodes", "properties"}); String output = capture.getOutput(); assertThat(capture.getOutput()).contains(String.format("successfully %d", except)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 2e1b7a78736f..c1d55accfd70 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -45,13 +45,13 @@ public static void init() throws Exception { // Init HA cluster omServiceId = "om-service-test1"; numOfOMs = 3; - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); conf.setQuietMode(false); // enable ratis for Scm. - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java index 62d50708c83a..d3d7c7766e7b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java @@ -66,7 +66,7 @@ public void init() throws Exception { conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs) .setNumOfStorageContainerManagers(numOfSCMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index 8985af2ac56a..71f1b682d0f4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -55,6 +55,7 @@ import org.apache.hadoop.tools.util.DistCpTestUtils; import org.apache.hadoop.util.functional.RemoteIterators; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -184,6 +185,7 @@ public void setup() throws Exception { remoteFS.delete(remoteDir, true); } + @AfterEach @Override public void teardown() throws Exception { // if remote FS supports IOStatistics log it. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/FaultInjectorImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/FaultInjectorImpl.java index 8656811fa87c..0be075e8656a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/FaultInjectorImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/FaultInjectorImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.utils; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.utils.FaultInjector; import org.assertj.core.api.Fail; import org.junit.jupiter.api.Assertions; @@ -32,6 +33,7 @@ public class FaultInjectorImpl extends FaultInjector { private CountDownLatch ready; private CountDownLatch wait; private Throwable ex; + private ContainerProtos.Type type = null; public FaultInjectorImpl() { init(); @@ -79,5 +81,15 @@ public void setException(Throwable e) { public Throwable getException() { return ex; } + + @VisibleForTesting + public void setType(ContainerProtos.Type type) { + this.type = type; + } + + @VisibleForTesting + public ContainerProtos.Type getType() { + return type; + } } diff --git a/hadoop-ozone/integration-test/src/test/resources/core-site.xml b/hadoop-ozone/integration-test/src/test/resources/core-site.xml index 77dd7ef99402..07768a939e41 100644 --- a/hadoop-ozone/integration-test/src/test/resources/core-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/core-site.xml @@ -20,5 +20,16 @@ - + + hadoop.proxyuser.proxyuser.users + * + + + hadoop.proxyuser.proxyuser.groups + * + + + hadoop.proxyuser.proxyuser.hosts + * + diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml index 4e79ae97fc24..779ed2b785cb 100644 --- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml @@ -37,7 +37,7 @@ - dfs.container.ratis.num.write.chunk.threads.per.volume + hdds.container.ratis.num.write.chunk.threads.per.volume 4 @@ -52,10 +52,25 @@ - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled true + + + hdds.heartbeat.interval + 1s + + + ozone.scm.heartbeat.thread.interval + 100ms + + + + ozone.scm.ratis.pipeline.limit + 3 + + ozone.scm.close.container.wait.duration 1s @@ -67,7 +82,8 @@ - dfs.container.ratis.log.appender.queue.byte-limit + hdds.container.ratis.log.appender.queue.byte-limit + 8MB diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index 2c1e03ce3f86..b92de2f5bc1e 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -44,6 +44,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> io.grpc grpc-protobuf + + + com.google.code.findbugs + jsr305 + + io.grpc diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 9cafd9b31b85..be6a1e179785 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -146,6 +146,7 @@ enum Type { SetSnapshotProperty = 128; ListStatusLight = 129; GetSnapshotInfo = 130; + RenameSnapshot = 131; ListOpenFiles = 132; } @@ -283,8 +284,9 @@ message OMRequest { optional MultipartUploadsExpiredAbortRequest multipartUploadsExpiredAbortRequest = 126; optional SetSnapshotPropertyRequest SetSnapshotPropertyRequest = 127; optional SnapshotInfoRequest SnapshotInfoRequest = 128; + optional RenameSnapshotRequest RenameSnapshotRequest = 129; - optional ListOpenFilesRequest ListOpenFilesRequest = 132; + optional ListOpenFilesRequest ListOpenFilesRequest = 130; } message OMResponse { @@ -407,8 +409,9 @@ message OMResponse { optional ListStatusLightResponse listStatusLightResponse = 129; optional SnapshotInfoResponse SnapshotInfoResponse = 130; optional OMLockDetailsProto omLockDetails = 131; + optional RenameSnapshotResponse RenameSnapshotResponse = 132; - optional ListOpenFilesResponse ListOpenFilesResponse = 132; + optional ListOpenFilesResponse ListOpenFilesResponse = 133; } enum Status { @@ -1033,6 +1036,10 @@ message KeyArgs { optional hadoop.hdds.ECReplicationConfig ecReplicationConfig = 19; // Force OM to update container cache location from SCL optional bool forceUpdateContainerCacheFromSCM = 20; + optional string ownerName = 21; + + // S3 object tags support + repeated hadoop.hdds.KeyValue tags = 22; } message KeyLocation { @@ -1115,6 +1122,8 @@ message KeyInfo { optional hadoop.hdds.ECReplicationConfig ecReplicationConfig = 17; optional FileChecksumProto fileChecksum = 18; optional bool isFile = 19; + optional string ownerName = 20; + repeated hadoop.hdds.KeyValue tags = 21; } message BasicKeyInfo { @@ -1125,6 +1134,8 @@ message BasicKeyInfo { optional hadoop.hdds.ReplicationType type = 5; optional hadoop.hdds.ReplicationFactor factor = 6; optional hadoop.hdds.ECReplicationConfig ecReplicationConfig = 7; + optional string eTag = 8; + optional string ownerName = 9; } message DirectoryInfo { @@ -1136,6 +1147,7 @@ message DirectoryInfo { required uint64 objectID = 6; required uint64 updateID = 7; required uint64 parentID = 8; + optional string ownerName = 9; } message RepeatedKeyInfo { @@ -1614,8 +1626,9 @@ message MultipartCommitUploadPartRequest { } message MultipartCommitUploadPartResponse { - // This one is returned as Etag for S3. optional string partName = 1; + // This one is returned as Etag for S3. + optional string eTag = 2; } message MultipartUploadCompleteRequest { @@ -1633,6 +1646,7 @@ message MultipartUploadCompleteResponse { message Part { required uint32 partNumber = 1; required string partName = 2; + optional string eTag = 3; } message MultipartUploadAbortRequest { @@ -1705,6 +1719,7 @@ message PartInfo { required string partName = 2; required uint64 modificationTime = 3; required uint64 size = 4; + optional string eTag = 5; } /** @@ -1857,6 +1872,14 @@ message CreateSnapshotRequest { optional uint64 creationTime = 5; } +message RenameSnapshotRequest { + optional string volumeName = 1; + optional string bucketName = 2; + optional string snapshotOldName = 3; + optional string snapshotNewName = 4; + optional uint64 renameTime = 5; +} + message ListSnapshotRequest { optional string volumeName = 1; optional string bucketName = 2; @@ -2019,6 +2042,10 @@ message DeleteSnapshotResponse { } +message RenameSnapshotResponse { + optional SnapshotInfo snapshotInfo = 1; +} + message SnapshotInfoResponse { optional SnapshotInfo snapshotInfo = 1; } @@ -2117,6 +2144,7 @@ message RecoverLeaseRequest { message RecoverLeaseResponse { optional bool response = 1 [deprecated=true]; optional KeyInfo keyInfo = 2; + optional bool isKeyInfo = 3 [default = true]; } message SetTimesRequest { diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java index 4cc76868f745..30fe6d69b765 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -20,25 +20,25 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.hdds.utils.db.CopyObject; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.Proto2Codec; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedPrefixInfo; -import java.util.BitSet; -import java.util.HashMap; +import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; +import java.util.concurrent.CopyOnWriteArrayList; /** * Wrapper class for Ozone prefix path info, currently mainly target for ACL but * can be extended for other OzFS optimizations in future. */ // TODO: support Auditable interface -public final class OmPrefixInfo extends WithObjectID { +public final class OmPrefixInfo extends WithObjectID implements CopyObject { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(PersistedPrefixInfo.getDefaultInstance()), OmPrefixInfo::getFromProtobuf, @@ -48,16 +48,13 @@ public static Codec getCodec() { return CODEC; } - private String name; - private List acls; + private final String name; + private final CopyOnWriteArrayList acls; - public OmPrefixInfo(String name, List acls, - Map metadata, long objectId, long updateId) { - this.name = name; - this.acls = acls; - this.metadata = metadata; - this.objectID = objectId; - this.updateID = updateId; + private OmPrefixInfo(Builder b) { + super(b); + name = b.name; + acls = new CopyOnWriteArrayList<>(b.acls); } /** @@ -100,17 +97,19 @@ public static OmPrefixInfo.Builder newBuilder() { /** * Builder for OmPrefixInfo. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String name; - private List acls; - private Map metadata; - private long objectID; - private long updateID; + private final List acls; public Builder() { //Default values this.acls = new LinkedList<>(); - this.metadata = new HashMap<>(); + } + + public Builder(OmPrefixInfo obj) { + super(obj); + setName(obj.name); + acls = new ArrayList<>(obj.getAcls()); } public Builder setAcls(List listOfAcls) { @@ -125,26 +124,28 @@ public Builder setName(String n) { return this; } + @Override public OmPrefixInfo.Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public OmPrefixInfo.Builder addAllMetadata( Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } + super.addAllMetadata(additionalMetadata); return this; } + @Override public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -154,7 +155,7 @@ public Builder setUpdateID(long id) { */ public OmPrefixInfo build() { Preconditions.checkNotNull(name); - return new OmPrefixInfo(name, acls, metadata, objectID, updateID); + return new OmPrefixInfo(this); } } @@ -164,9 +165,9 @@ public OmPrefixInfo build() { public PersistedPrefixInfo getProtobuf() { PersistedPrefixInfo.Builder pib = PersistedPrefixInfo.newBuilder().setName(name) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) - .setObjectID(objectID) - .setUpdateID(updateID); + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()); if (acls != null) { pib.addAllAcls(OzoneAclStorageUtil.toProtobuf(acls)); } @@ -210,14 +211,14 @@ public boolean equals(Object o) { OmPrefixInfo that = (OmPrefixInfo) o; return name.equals(that.name) && Objects.equals(acls, that.acls) && - Objects.equals(metadata, that.metadata) && - objectID == that.objectID && - updateID == that.updateID; + Objects.equals(getMetadata(), that.getMetadata()) && + getObjectID() == that.getObjectID() && + getUpdateID() == that.getUpdateID(); } @Override public int hashCode() { - return Objects.hash(name, acls, metadata, objectID, updateID); + return Objects.hash(name, acls, getMetadata(), getObjectID(), getUpdateID()); } @Override @@ -225,26 +226,19 @@ public String toString() { return "OmPrefixInfo{" + "name='" + name + '\'' + ", acls=" + acls + - ", metadata=" + metadata + - ", objectID=" + objectID + - ", updateID=" + updateID + + ", metadata=" + getMetadata() + + ", objectID=" + getObjectID() + + ", updateID=" + getUpdateID() + '}'; } - /** - * Return a new copy of the object. - */ + @Override public OmPrefixInfo copyObject() { - List aclList = acls.stream().map(acl -> - new OzoneAcl(acl.getType(), acl.getName(), - (BitSet) acl.getAclBitSet().clone(), acl.getAclScope())) - .collect(Collectors.toList()); + return toBuilder().build(); + } - Map metadataList = new HashMap<>(); - if (metadata != null) { - metadata.forEach((k, v) -> metadataList.put(k, v)); - } - return new OmPrefixInfo(name, aclList, metadataList, objectID, updateID); + public Builder toBuilder() { + return new Builder(this); } } diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java index abc6359efcaf..097b5da2fc09 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java @@ -17,10 +17,14 @@ */ package org.apache.hadoop.ozone.om.helpers; -import com.google.protobuf.ByteString; import java.util.BitSet; +import java.util.EnumSet; +import java.util.List; +import java.util.stream.Collectors; + import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl.AclScope; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.OzoneAclInfo; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.OzoneAclInfo.OzoneAclScope; @@ -49,15 +53,18 @@ public static OzoneAclInfo toProtobuf(OzoneAcl acl) { .setName(acl.getName()) .setType(OzoneAclType.valueOf(acl.getType().name())) .setAclScope(OzoneAclScope.valueOf(acl.getAclScope().name())) - .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray())); + .setRights(acl.getAclByteString()); return builder.build(); } public static OzoneAcl fromProtobuf(OzoneAclInfo protoAcl) { BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray()); + List aclTypeList = aclRights.stream() + .mapToObj(a -> IAccessAuthorizer.ACLType.values()[a]) + .collect(Collectors.toList()); + EnumSet aclSet = EnumSet.copyOf(aclTypeList); return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), - protoAcl.getName(), aclRights, - AclScope.valueOf(protoAcl.getAclScope().name())); + protoAcl.getName(), AclScope.valueOf(protoAcl.getAclScope().name()), aclSet); } } diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java index 5226f315c8be..3a57fa050d02 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java @@ -25,8 +25,8 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.junit.jupiter.api.Test; +import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -40,7 +40,7 @@ public class TestOmPrefixInfo { private static OzoneManagerStorageProtos.OzoneAclInfo buildTestOzoneAclInfo( String aclString) { OzoneAcl oacl = OzoneAcl.parseAcl(aclString); - ByteString rights = ByteString.copyFrom(oacl.getAclBitSet().toByteArray()); + final ByteString rights = oacl.getAclByteString(); return OzoneManagerStorageProtos.OzoneAclInfo.newBuilder() .setType(OzoneManagerStorageProtos.OzoneAclInfo.OzoneAclType.USER) .setName(oacl.getName()) @@ -73,10 +73,14 @@ private OmPrefixInfo getOmPrefixInfoForTest(String path, String identityString, IAccessAuthorizer.ACLType aclType, OzoneAcl.AclScope scope) { - return new OmPrefixInfo(path, - Collections.singletonList(new OzoneAcl( + return OmPrefixInfo.newBuilder() + .setName(path) + .setAcls(new ArrayList<>(Collections.singletonList(new OzoneAcl( identityType, identityString, - aclType, scope)), new HashMap<>(), 10, 100); + scope, aclType)))) + .setObjectID(10) + .setUpdateID(100) + .build(); } @@ -97,7 +101,7 @@ public void testCopyObject() { // Change acls and check. omPrefixInfo.addAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, username, - IAccessAuthorizer.ACLType.READ, ACCESS)); + ACCESS, IAccessAuthorizer.ACLType.READ)); assertNotEquals(omPrefixInfo, clonePrefixInfo); diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java index f3ad1d8c7628..4820b37e1ba7 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java @@ -45,7 +45,7 @@ public void testToAndFromPersistedFormat() throws IOException { List acls = new LinkedList<>(); OzoneAcl ozoneAcl = new OzoneAcl(ACLIdentityType.USER, - "hive", ACLType.ALL, ACCESS); + "hive", ACCESS, ACLType.ALL); acls.add(ozoneAcl); OmPrefixInfo opiSave = OmPrefixInfo.newBuilder() .setName("/user/hive/warehouse") diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index d076e12932d0..5af36fb2291d 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -231,11 +231,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - org.jmockit - jmockit - test - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index e641d132702d..54de09f3328b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -98,6 +98,7 @@ public enum OMAction implements AuditAction { CREATE_SNAPSHOT, LIST_SNAPSHOT, DELETE_SNAPSHOT, + RENAME_SNAPSHOT, SNAPSHOT_MOVE_DELETED_KEYS, SNAPSHOT_INFO, SET_TIMES, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 5bc894b2b922..68429c36d084 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -154,7 +154,8 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) && context.getAclRights() != ACLType.READ); if (bucketNeedResolved || - ozObject.getResourceType() == OzoneObj.ResourceType.KEY) { + ozObject.getResourceType() == OzoneObj.ResourceType.KEY || + ozObject.getResourceType() == OzoneObj.ResourceType.PREFIX) { try { ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java new file mode 100644 index 000000000000..d5916c6adc5d --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; + +import java.util.Iterator; +import java.util.Map.Entry; + +/** + * A class for collecting and reporting bucket utilization metrics. + *

    + * Available metrics: + *

      + *
    • Bytes used in bucket. + *
    • Bucket quote in bytes. + *
    • Bucket quota in namespace. + *
    • Bucket available space. Calculated from difference between used bytes in bucket and bucket quota. + * If bucket quote is not set then this metric show -1 as value. + *
    + */ +@InterfaceAudience.Private +@Metrics(about = "Ozone Bucket Utilization Metrics", context = OzoneConsts.OZONE) +public class BucketUtilizationMetrics implements MetricsSource { + + private static final String SOURCE = BucketUtilizationMetrics.class.getSimpleName(); + + private final OMMetadataManager metadataManager; + + public BucketUtilizationMetrics(OMMetadataManager metadataManager) { + this.metadataManager = metadataManager; + } + + public static BucketUtilizationMetrics create(OMMetadataManager metadataManager) { + MetricsSystem ms = DefaultMetricsSystem.instance(); + return ms.register(SOURCE, "Bucket Utilization Metrics", new BucketUtilizationMetrics(metadataManager)); + } + + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + Iterator, CacheValue>> bucketIterator = metadataManager.getBucketIterator(); + + while (bucketIterator.hasNext()) { + Entry, CacheValue> entry = bucketIterator.next(); + OmBucketInfo bucketInfo = entry.getValue().getCacheValue(); + if (bucketInfo == null) { + continue; + } + + long availableSpace; + long quotaInBytes = bucketInfo.getQuotaInBytes(); + if (quotaInBytes == -1) { + availableSpace = quotaInBytes; + } else { + availableSpace = Math.max(bucketInfo.getQuotaInBytes() - bucketInfo.getUsedBytes(), 0); + } + + collector.addRecord(SOURCE) + .setContext("Bucket metrics") + .tag(BucketMetricsInfo.VolumeName, bucketInfo.getVolumeName()) + .tag(BucketMetricsInfo.BucketName, bucketInfo.getBucketName()) + .addGauge(BucketMetricsInfo.BucketUsedBytes, bucketInfo.getUsedBytes()) + .addGauge(BucketMetricsInfo.BucketQuotaBytes, bucketInfo.getQuotaInBytes()) + .addGauge(BucketMetricsInfo.BucketQuotaNamespace, bucketInfo.getQuotaInNamespace()) + .addGauge(BucketMetricsInfo.BucketAvailableBytes, availableSpace); + } + } + + public void unRegister() { + MetricsSystem ms = DefaultMetricsSystem.instance(); + ms.unregisterSource(SOURCE); + } + + enum BucketMetricsInfo implements MetricsInfo { + VolumeName("Volume Metrics."), + BucketName("Bucket Metrics."), + BucketUsedBytes("Bytes used by bucket."), + BucketQuotaBytes("Bucket quote in bytes."), + BucketQuotaNamespace("Bucket quota in namespace."), + BucketAvailableBytes("Bucket available space."); + + private final String desc; + + BucketMetricsInfo(String desc) { + this.desc = desc; + } + + @Override + public String description() { + return desc; + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 590fe9ef2725..0e5e972561da 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -32,6 +32,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.Stack; import java.util.TreeMap; @@ -44,10 +45,15 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.Node; +import org.apache.hadoop.hdds.scm.net.NodeImpl; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.utils.BackgroundService; @@ -56,6 +62,9 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.net.CachedDNSToSwitchMapping; +import org.apache.hadoop.net.DNSToSwitchMapping; +import org.apache.hadoop.net.TableMapping; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -94,6 +103,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -106,6 +116,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; +import static org.apache.hadoop.hdds.scm.net.NetConstants.NODE_COST_DEFAULT; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; @@ -121,6 +132,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT; @@ -187,6 +199,7 @@ public class KeyManagerImpl implements KeyManager { private BackgroundService openKeyCleanupService; private BackgroundService multipartUploadCleanupService; private SnapshotDirectoryCleaningService snapshotDirectoryCleaningService; + private DNSToSwitchMapping dnsToSwitchMapping; public KeyManagerImpl(OzoneManager om, ScmClient scmClient, OzoneConfiguration conf, OMPerformanceMetrics metrics) { @@ -336,6 +349,16 @@ public void start(OzoneConfiguration configuration) { ozoneManager, configuration); multipartUploadCleanupService.start(); } + + Class dnsToSwitchMappingClass = + configuration.getClass( + DFSConfigKeysLegacy.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + TableMapping.class, DNSToSwitchMapping.class); + DNSToSwitchMapping newInstance = ReflectionUtils.newInstance( + dnsToSwitchMappingClass, configuration); + dnsToSwitchMapping = + ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance + : new CachedDNSToSwitchMapping(newInstance)); } KeyProviderCryptoExtension getKMSProvider() { @@ -609,13 +632,17 @@ public ListKeysResult listKeys(String volumeName, String bucketName, int maxKeys) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - + OmBucketInfo omBucketInfo = getBucketInfo(volumeName, bucketName); + if (omBucketInfo == null) { + throw new OMException("Bucket " + bucketName + " not found.", + ResultCodes.BUCKET_NOT_FOUND); + } + BucketLayout bucketLayout = omBucketInfo.getBucketLayout(); // We don't take a lock in this path, since we walk the // underlying table using an iterator. That automatically creates a // snapshot of the data, so we don't need these locks at a higher level // when we iterate. - - if (enableFileSystemPaths) { + if (bucketLayout.shouldNormalizePaths(enableFileSystemPaths)) { startKey = OmUtils.normalizeKey(startKey, true); keyPrefix = OmUtils.normalizeKey(keyPrefix, true); } @@ -817,10 +844,19 @@ public OmMultipartUploadListParts listParts(String volumeName, if (nextPartNumberMarker > partNumberMarker) { String partName = getPartName(partKeyInfo, volumeName, bucketName, keyName); + // Before HDDS-9680, MPU part does not have eTag metadata, for + // this case, we return null. The S3G will handle this case by + // using the MPU part name as the eTag field instead. + Optional eTag = partKeyInfo.getPartKeyInfo() + .getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(ETAG)) + .findFirst(); OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), partName, partKeyInfo.getPartKeyInfo().getModificationTime(), - partKeyInfo.getPartKeyInfo().getDataSize()); + partKeyInfo.getPartKeyInfo().getDataSize(), + eTag.map(HddsProtos.KeyValue::getValue).orElse(null)); omPartInfoList.add(omPartInfo); //if there are parts, use replication type from one of the parts @@ -907,12 +943,6 @@ private String getPartName(PartKeyInfo partKeyInfo, String volName, return partName; } - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ @Override public List getAcl(OzoneObj obj) throws IOException { validateOzoneObj(obj); @@ -1404,6 +1434,7 @@ private OmKeyInfo createDirectoryKey(OmKeyInfo keyInfo, String keyName) .setReplicationConfig(keyInfo.getReplicationConfig()) .setFileEncryptionInfo(encInfo) .setAcls(keyInfo.getAcls()) + .setOwnerName(keyInfo.getOwnerName()) .build(); } /** @@ -1834,8 +1865,7 @@ private FileEncryptionInfo getFileEncryptionInfo(OmBucketInfo bucketInfo) return encInfo; } - @VisibleForTesting - void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { + private void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { if (keyInfos != null && clientMachine != null) { Map, List> sortedPipelines = new HashMap<>(); for (OmKeyInfo keyInfo : keyInfos) { @@ -1855,8 +1885,7 @@ void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { LOG.warn("No datanodes in pipeline {}", pipeline.getId()); continue; } - sortedNodes = sortDatanodes(clientMachine, nodes, keyInfo, - uuidList); + sortedNodes = sortDatanodes(nodes, clientMachine); if (sortedNodes != null) { sortedPipelines.put(uuidSet, sortedNodes); } @@ -1864,30 +1893,67 @@ void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { LOG.debug("Found sorted datanodes for pipeline {} and client {} " + "in cache", pipeline.getId(), clientMachine); } - pipeline.setNodesInOrder(sortedNodes); + if (!Objects.equals(pipeline.getNodesInOrder(), sortedNodes)) { + k.setPipeline(pipeline.copyWithNodesInOrder(sortedNodes)); + } } } } } - private List sortDatanodes(String clientMachine, - List nodes, OmKeyInfo keyInfo, List nodeList) { - List sortedNodes = null; + @VisibleForTesting + public List sortDatanodes(List nodes, + String clientMachine) { + final Node client = getClientNode(clientMachine, nodes); + return ozoneManager.getClusterMap() + .sortByDistanceCost(client, nodes, nodes.size()); + } + + private Node getClientNode(String clientMachine, + List nodes) { + List matchingNodes = new ArrayList<>(); + boolean useHostname = ozoneManager.getConfiguration().getBoolean( + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); + for (DatanodeDetails node : nodes) { + if ((useHostname ? node.getHostName() : node.getIpAddress()).equals( + clientMachine)) { + matchingNodes.add(node); + } + } + return !matchingNodes.isEmpty() ? matchingNodes.get(0) : + getOtherNode(clientMachine); + } + + private Node getOtherNode(String clientMachine) { try { - sortedNodes = scmClient.getBlockClient() - .sortDatanodes(nodeList, clientMachine); - if (LOG.isDebugEnabled()) { - LOG.debug("Sorted datanodes {} for client {}, result: {}", nodes, - clientMachine, sortedNodes); + String clientLocation = resolveNodeLocation(clientMachine); + if (clientLocation != null) { + Node rack = ozoneManager.getClusterMap().getNode(clientLocation); + if (rack instanceof InnerNode) { + return new NodeImpl(clientMachine, clientLocation, + (InnerNode) rack, rack.getLevel() + 1, + NODE_COST_DEFAULT); + } } - } catch (IOException e) { - LOG.warn("Unable to sort datanodes based on distance to client, " - + " volume={}, bucket={}, key={}, client={}, datanodes={}, " - + " exception={}", - keyInfo.getVolumeName(), keyInfo.getBucketName(), - keyInfo.getKeyName(), clientMachine, nodeList, e.getMessage()); + } catch (Exception e) { + LOG.info("Could not resolve client {}: {}", + clientMachine, e.getMessage()); + } + return null; + } + + private String resolveNodeLocation(String hostname) { + List hosts = Collections.singletonList(hostname); + List resolvedHosts = dnsToSwitchMapping.resolve(hosts); + if (resolvedHosts != null && !resolvedHosts.isEmpty()) { + String location = resolvedHosts.get(0); + LOG.debug("Node {} resolved to location {}", hostname, location); + return location; + } else { + LOG.debug("Node resolution did not yield any result for {}", hostname); + return null; } - return sortedNodes; } private static List toNodeUuid(Collection nodes) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index 7981222c4c6a..86d8352697ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -342,7 +342,7 @@ public HeapEntry next() { } public void close() throws IOException { - iterators.forEach(IOUtils::closeQuietly); + IOUtils.closeQuietly(iterators); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index cc8acc483406..c8237b79673e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.utils.DBCheckpointServlet; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils; +import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; @@ -221,43 +222,24 @@ public static Map normalizeExcludeList( } /** - * Pauses rocksdb compaction threads while creating copies of - * compaction logs and hard links of sst backups. + * Copies compaction logs and hard links of sst backups to tmpDir. * @param tmpdir - Place to create copies/links * @param flush - Whether to flush the db or not. * @return Checkpoint containing snapshot entries expected. */ @Override - public DBCheckpoint getCheckpoint(Path tmpdir, boolean flush) - throws IOException { - DBCheckpoint checkpoint; - + public DBCheckpoint getCheckpoint(Path tmpdir, boolean flush) throws IOException { // make tmp directories to contain the copies RocksDBCheckpointDiffer differ = getDbStore().getRocksDBCheckpointDiffer(); - DirectoryData sstBackupDir = new DirectoryData(tmpdir, - differ.getSSTBackupDir()); - DirectoryData compactionLogDir = new DirectoryData(tmpdir, - differ.getCompactionLogDir()); - - long startTime = System.currentTimeMillis(); - long pauseCounter = PAUSE_COUNTER.incrementAndGet(); - - try { - LOG.info("Compaction pausing {} started.", pauseCounter); - // Pause compactions, Copy/link files and get checkpoint. - differ.incrementTarballRequestCount(); - FileUtils.copyDirectory(compactionLogDir.getOriginalDir(), - compactionLogDir.getTmpDir()); - OmSnapshotUtils.linkFiles(sstBackupDir.getOriginalDir(), - sstBackupDir.getTmpDir()); - checkpoint = getDbStore().getCheckpoint(flush); - } finally { - // Unpause the compaction threads. - differ.decrementTarballRequestCountAndNotify(); - long elapsedTime = System.currentTimeMillis() - startTime; - LOG.info("Compaction pausing {} ended. Elapsed ms: {}", pauseCounter, elapsedTime); - } - return checkpoint; + DirectoryData sstBackupDir = new DirectoryData(tmpdir, differ.getSSTBackupDir()); + DirectoryData compactionLogDir = new DirectoryData(tmpdir, differ.getCompactionLogDir()); + + // Create checkpoint and then copy the files so that it has all the compaction entries and files. + DBCheckpoint dbCheckpoint = getDbStore().getCheckpoint(flush); + FileUtils.copyDirectory(compactionLogDir.getOriginalDir(), compactionLogDir.getTmpDir()); + OmSnapshotUtils.linkFiles(sstBackupDir.getOriginalDir(), sstBackupDir.getTmpDir()); + + return dbCheckpoint; } @@ -317,8 +299,7 @@ private boolean getFilesForArchive(DBCheckpoint checkpoint, // Get the snapshot files. Set snapshotPaths = waitForSnapshotDirs(checkpoint); - Path snapshotDir = Paths.get(OMStorage.getOmDbDir(getConf()).toString(), - OM_SNAPSHOT_DIR); + Path snapshotDir = getSnapshotDir(); if (!processDir(snapshotDir, copyFiles, hardLinkFiles, sstFilesToExclude, snapshotPaths, excluded, copySize, null)) { return false; @@ -635,6 +616,15 @@ private OzoneConfiguration getConf() { .getConfiguration(); } + private Path getSnapshotDir() { + OzoneManager om = (OzoneManager) getServletContext().getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); + RDBStore store = (RDBStore) om.getMetadataManager().getStore(); + // store.getSnapshotsParentDir() returns path to checkpointState (e.g. /db.snapshots/checkpointState) + // But we need to return path till db.snapshots which contains checkpointState and diffState. + // So that whole snapshots and compaction information can be transferred to follower. + return Paths.get(store.getSnapshotsParentDir()).getParent(); + } + @Override public BootstrapStateHandler.Lock getBootstrapStateLock() { return lock; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 2fbbbe153040..1c0ec78cfb22 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -26,6 +26,7 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; /** * This class is for maintaining Ozone Manager statistics. @@ -74,7 +75,10 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSnapshotLists; private @Metric MutableCounterLong numSnapshotDiffJobs; private @Metric MutableCounterLong numSnapshotInfos; + private @Metric MutableCounterLong numSnapshotPurges; + private @Metric MutableCounterLong numSnapshotSetProperties; + private @Metric MutableGaugeInt numSnapshotCacheSize; private @Metric MutableCounterLong numGetFileStatus; private @Metric MutableCounterLong numCreateDirectory; private @Metric MutableCounterLong numCreateFile; @@ -137,6 +141,8 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSnapshotListFails; private @Metric MutableCounterLong numSnapshotDiffJobFails; private @Metric MutableCounterLong numSnapshotInfoFails; + private @Metric MutableCounterLong numSnapshotPurgeFails; + private @Metric MutableCounterLong numSnapshotSetPropertyFails; private @Metric MutableCounterLong numSnapshotActive; private @Metric MutableCounterLong numSnapshotDeleted; @@ -489,6 +495,14 @@ public void incNumSnapshotInfos() { numSnapshotInfos.incr(); } + public void incNumSnapshotPurges() { + numSnapshotPurges.incr(); + } + + public void incNumSnapshotSetProperties() { + numSnapshotSetProperties.incr(); + } + public void incNumSnapshotDiffJobs() { numSnapshotDiffJobs.incr(); } @@ -504,6 +518,15 @@ public void incNumSnapshotDiffJobFails() { public void incNumSnapshotInfoFails() { numSnapshotInfoFails.incr(); } + + public void incNumSnapshotPurgeFails() { + numSnapshotPurgeFails.incr(); + } + + public void incNumSnapshotSetPropertyFails() { + numSnapshotSetPropertyFails.incr(); + } + public void setNumSnapshotActive(long num) { long currVal = numSnapshotActive.value(); numSnapshotActive.incr(num - currVal); @@ -530,6 +553,17 @@ public void decNumSnapshotDeleted() { numSnapshotDeleted.incr(-1); } + public int getNumSnapshotCacheSize() { + return numSnapshotCacheSize.value(); + } + public void incNumSnapshotCacheSize() { + numSnapshotCacheSize.incr(); + } + + public void decNumSnapshotCacheSize() { + numSnapshotCacheSize.decr(); + } + public void incNumCompleteMultipartUploadFails() { numCompleteMultipartUploadFails.incr(); } @@ -1305,6 +1339,14 @@ public long getNumSnapshotDiffJobs() { return numSnapshotDiffJobs.value(); } + public long getNumSnapshotPurges() { + return numSnapshotPurges.value(); + } + + public long getNumSnapshotSetProperties() { + return numSnapshotSetProperties.value(); + } + public long getNumSnapshotCreateFails() { return numSnapshotCreateFails.value(); } @@ -1329,6 +1371,13 @@ public long getNumSnapshotDeleted() { return numSnapshotDeleted.value(); } + public long getNumSnapshotPurgeFails() { + return numSnapshotPurgeFails.value(); + } + + public long getNumSnapshotSetPropertyFails() { + return numSnapshotSetPropertyFails.value(); + } public void incNumTrashRenames() { numTrashRenames.incr(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java index d118e2f4ecc9..f2f11025158d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java @@ -123,20 +123,20 @@ public void addLookupLatency(long latencyInNs) { lookupLatencyNs.add(latencyInNs); } - public MutableRate getLookupRefreshLocationLatencyNs() { + MutableRate getLookupRefreshLocationLatencyNs() { return lookupRefreshLocationLatencyNs; } - public MutableRate getLookupGenerateBlockTokenLatencyNs() { + MutableRate getLookupGenerateBlockTokenLatencyNs() { return lookupGenerateBlockTokenLatencyNs; } - public MutableRate getLookupReadKeyInfoLatencyNs() { + MutableRate getLookupReadKeyInfoLatencyNs() { return lookupReadKeyInfoLatencyNs; } - public MutableRate getLookupAclCheckLatencyNs() { + MutableRate getLookupAclCheckLatencyNs() { return lookupAclCheckLatencyNs; } @@ -144,7 +144,7 @@ public void addS3VolumeContextLatencyNs(long latencyInNs) { s3VolumeContextLatencyNs.add(latencyInNs); } - public MutableRate getLookupResolveBucketLatencyNs() { + MutableRate getLookupResolveBucketLatencyNs() { return lookupResolveBucketLatencyNs; } @@ -152,27 +152,27 @@ public void addGetKeyInfoLatencyNs(long value) { getKeyInfoLatencyNs.add(value); } - public MutableRate getGetKeyInfoAclCheckLatencyNs() { + MutableRate getGetKeyInfoAclCheckLatencyNs() { return getKeyInfoAclCheckLatencyNs; } - public MutableRate getGetKeyInfoGenerateBlockTokenLatencyNs() { + MutableRate getGetKeyInfoGenerateBlockTokenLatencyNs() { return getKeyInfoGenerateBlockTokenLatencyNs; } - public MutableRate getGetKeyInfoReadKeyInfoLatencyNs() { + MutableRate getGetKeyInfoReadKeyInfoLatencyNs() { return getKeyInfoReadKeyInfoLatencyNs; } - public MutableRate getGetKeyInfoRefreshLocationLatencyNs() { + MutableRate getGetKeyInfoRefreshLocationLatencyNs() { return getKeyInfoRefreshLocationLatencyNs; } - public MutableRate getGetKeyInfoResolveBucketLatencyNs() { + MutableRate getGetKeyInfoResolveBucketLatencyNs() { return getKeyInfoResolveBucketLatencyNs; } - public MutableRate getGetKeyInfoSortDatanodesLatencyNs() { + MutableRate getGetKeyInfoSortDatanodesLatencyNs() { return getKeyInfoSortDatanodesLatencyNs; } @@ -216,11 +216,11 @@ public MutableRate getValidateAndUpdateCacheLatencyNs() { return validateAndUpdateCacheLatencyNs; } - public MutableRate getListKeysAclCheckLatencyNs() { + MutableRate getListKeysAclCheckLatencyNs() { return listKeysAclCheckLatencyNs; } - public MutableRate getListKeysResolveBucketLatencyNs() { + MutableRate getListKeysResolveBucketLatencyNs() { return listKeysResolveBucketLatencyNs; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java index 970cd8b95f16..66c525f0712a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; import org.apache.hadoop.ozone.om.protocol.OMInterServiceProtocol; import org.apache.hadoop.ozone.om.protocol.OMAdminProtocol; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; @@ -31,6 +32,7 @@ import java.util.List; import java.util.function.Supplier; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL; import static org.apache.hadoop.ozone.om.OMConfigKeys .OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL; @@ -61,7 +63,9 @@ public static OMPolicyProvider getInstance() { new Service(OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL, OMInterServiceProtocol.class), new Service(OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL, - OMAdminProtocol.class) + OMAdminProtocol.class), + new Service(OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, + ReconfigureProtocol.class) ); @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 6bcefc47cb71..6b48cea0950a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -91,7 +91,6 @@ import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; @@ -111,7 +110,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; @@ -908,40 +906,6 @@ public long getOmEpoch() { return omEpoch; } - /** - * Returns true if the firstArray startsWith the bytes of secondArray. - * - * @param firstArray - Byte array - * @param secondArray - Byte array - * @return true if the first array bytes match the bytes in the second array. - */ - private boolean startsWith(byte[] firstArray, byte[] secondArray) { - - if (firstArray == null) { - // if both are null, then the arrays match, else if first is null and - // second is not, then this function returns false. - return secondArray == null; - } - - - if (secondArray != null) { - // If the second array is longer then first array cannot be starting with - // the bytes of second array. - if (secondArray.length > firstArray.length) { - return false; - } - - for (int ndx = 0; ndx < secondArray.length; ndx++) { - if (firstArray[ndx] != secondArray[ndx]) { - return false; - } - } - return true; //match, return true. - } - return false; // if first is not null and second is null, we define that - // array does not start with same chars. - } - /** * Given a volume, check if it is empty, i.e there are no buckets inside it. * We iterate in the bucket table and see if there is any key that starts with @@ -1632,7 +1596,7 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, OmBucketInfo bucketInfo = getBucketTable().get(bucketKey); // Get the latest snapshot in snapshot path. - try (ReferenceCounted + try (ReferenceCounted rcLatestSnapshot = getLatestActiveSnapshot( keySplit[1], keySplit[2], omSnapshotManager)) { @@ -1650,13 +1614,12 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, if (rcLatestSnapshot != null) { Table prevKeyTable = - ((OmSnapshot) rcLatestSnapshot.get()) + rcLatestSnapshot.get() .getMetadataManager() .getKeyTable(bucketInfo.getBucketLayout()); Table prevDeletedTable = - ((OmSnapshot) rcLatestSnapshot.get()) - .getMetadataManager().getDeletedTable(); + rcLatestSnapshot.get().getMetadataManager().getDeletedTable(); String prevKeyTableDBKey = getSnapshotRenamedTable() .get(dbRenameKey); String prevDelTableDBKey = getOzoneKey(info.getVolumeName(), @@ -1742,8 +1705,7 @@ private boolean versionExistsInPreviousSnapshot(OmKeyInfo omKeyInfo, /** * Get the latest OmSnapshot for a snapshot path. */ - public ReferenceCounted< - IOmMetadataReader, SnapshotCache> getLatestActiveSnapshot( + public ReferenceCounted getLatestActiveSnapshot( String volumeName, String bucketName, OmSnapshotManager snapshotManager) throws IOException { @@ -1777,13 +1739,12 @@ IOmMetadataReader, SnapshotCache> getLatestActiveSnapshot( } } - Optional> rcOmSnapshot = + Optional> rcOmSnapshot = snapshotInfo.isPresent() ? Optional.ofNullable( - snapshotManager.checkForSnapshot(volumeName, + snapshotManager.getSnapshot(volumeName, bucketName, - getSnapshotPrefix(snapshotInfo.get().getName()), - true) + snapshotInfo.get().getName()) ) : Optional.empty(); @@ -1877,18 +1838,24 @@ public ExpiredOpenKeys getExpiredOpenKeys(Duration expireThreshold, .filter(id -> id.equals(clientIdString)) .isPresent(); - if (!isHsync && openKeyInfo.getCreationTime() <= expiredCreationTimestamp) { + if ((!isHsync && openKeyInfo.getCreationTime() <= expiredCreationTimestamp) || + (openKeyInfo.getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY)) || + (openKeyInfo.getMetadata().containsKey(OzoneConsts.OVERWRITTEN_HSYNC_KEY))) { // add non-hsync'ed keys + // also add hsync keys which are already deleted/overwritten from keyTable expiredKeys.addOpenKey(openKeyInfo, dbOpenKeyName); num++; } else if (isHsync && openKeyInfo.getModificationTime() <= expiredLeaseTimestamp && !openKeyInfo.getMetadata().containsKey(OzoneConsts.LEASE_RECOVERY)) { // add hsync'ed keys final OmKeyInfo info = kt.get(dbKeyName); + // Set keyName from openFileTable which contains keyName with full path like(/a/b/c/d/e/file1), + // which is required in commit key request. + // Whereas fileTable contains only leaf in keyName(like file1) and so cannot be used in commit request. final KeyArgs.Builder keyArgs = KeyArgs.newBuilder() .setVolumeName(info.getVolumeName()) .setBucketName(info.getBucketName()) - .setKeyName(info.getKeyName()) + .setKeyName(openKeyInfo.getKeyName()) .setDataSize(info.getDataSize()); java.util.Optional.ofNullable(info.getLatestVersionLocations()) .map(OmKeyLocationInfoGroup::getLocationList) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java index 7c332788d28a..84a5148720b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java @@ -387,7 +387,7 @@ public List getAcl(OzoneObj obj) throws IOException { String volumeName = obj.getVolumeName(); String bucketName = obj.getBucketName(); String keyName = obj.getKeyName(); - if (obj.getResourceType() == ResourceType.KEY) { + if (obj.getResourceType() == ResourceType.KEY || obj.getResourceType() == ResourceType.PREFIX) { ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink( Pair.of(volumeName, bucketName)); volumeName = resolvedBucket.realVolume(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java index 5839c61cf31f..f863c086028e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java @@ -47,6 +47,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.UUID; import java.util.stream.Collectors; /** @@ -74,6 +75,7 @@ public class OmSnapshot implements IOmMetadataReader, Closeable { private final String volumeName; private final String bucketName; private final String snapshotName; + private final UUID snapshotID; // To access snapshot checkpoint DB metadata private final OMMetadataManager omMetadataManager; private final KeyManager keyManager; @@ -83,7 +85,8 @@ public OmSnapshot(KeyManager keyManager, OzoneManager ozoneManager, String volumeName, String bucketName, - String snapshotName) { + String snapshotName, + UUID snapshotID) { IAccessAuthorizer accessAuthorizer = OzoneAuthorizerFactory.forSnapshot(ozoneManager, keyManager, prefixManager); @@ -93,6 +96,7 @@ public OmSnapshot(KeyManager keyManager, this.snapshotName = snapshotName; this.bucketName = bucketName; this.volumeName = volumeName; + this.snapshotID = snapshotID; this.keyManager = keyManager; this.omMetadataManager = keyManager.getMetadataManager(); } @@ -295,6 +299,10 @@ public String getName() { return snapshotName; } + public UUID getSnapshotID() { + return snapshotID; + } + @Override public void close() throws IOException { // Close DB diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 2dab56ede67b..11edd28e9442 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -35,6 +35,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.UUID; import com.google.common.cache.RemovalListener; import org.apache.hadoop.hdds.StringUtils; @@ -82,6 +83,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIFF_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES; @@ -244,7 +247,7 @@ public OmSnapshotManager(OzoneManager ozoneManager) { OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE, OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE_DEFAULT); - CacheLoader loader = createCacheLoader(); + CacheLoader loader = createCacheLoader(); // TODO: [SNAPSHOT] Remove this if not going to make SnapshotCache impl // pluggable. @@ -271,10 +274,15 @@ public OmSnapshotManager(OzoneManager ozoneManager) { }; // Init snapshot cache - this.snapshotCache = new SnapshotCache(this, loader, softCacheSize); + long cacheCleanupServiceInterval = ozoneManager.getConfiguration() + .getTimeDuration(OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL, + OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); + this.snapshotCache = new SnapshotCache(loader, softCacheSize, ozoneManager.getMetrics(), + cacheCleanupServiceInterval); this.snapshotDiffManager = new SnapshotDiffManager(snapshotDiffDb, differ, - ozoneManager, snapshotCache, snapDiffJobCf, snapDiffReportCf, + ozoneManager, snapDiffJobCf, snapDiffReportCf, columnFamilyOptions, codecRegistry); diffCleanupServiceInterval = ozoneManager.getConfiguration() @@ -325,19 +333,25 @@ public boolean canDisableFsSnapshot(OMMetadataManager ommm) { return isSnapshotInfoTableEmpty; } - private CacheLoader createCacheLoader() { - return new CacheLoader() { + private CacheLoader createCacheLoader() { + return new CacheLoader() { @Nonnull @Override - public OmSnapshot load(@Nonnull String snapshotTableKey) - throws IOException { - // Check if the snapshot exists - final SnapshotInfo snapshotInfo = getSnapshotInfo(snapshotTableKey); + public OmSnapshot load(@Nonnull UUID snapshotId) throws IOException { + String snapshotTableKey = ((OmMetadataManagerImpl) ozoneManager.getMetadataManager()) + .getSnapshotChainManager() + .getTableKey(snapshotId); + + // SnapshotChain maintains in-memory reverse mapping of snapshotId to snapshotName based on snapshotInfoTable. + // So it should not happen ideally. + // If it happens, then either snapshot has been purged in between or SnapshotChain is corrupted + // and missing some entries which needs investigation. + if (snapshotTableKey == null) { + throw new IOException("No snapshot exist with snapshotId: " + snapshotId); + } - // Block snapshot from loading when it is no longer active e.g. DELETED, - // unless this is called from SnapshotDeletingService. - checkSnapshotActive(snapshotInfo, true); + final SnapshotInfo snapshotInfo = getSnapshotInfo(snapshotTableKey); CacheValue cacheValue = ozoneManager.getMetadataManager() .getSnapshotInfoTable() @@ -365,7 +379,7 @@ public OmSnapshot load(@Nonnull String snapshotTableKey) try { // create the other manager instances based on snapshot // metadataManager - PrefixManagerImpl pm = new PrefixManagerImpl(snapshotMetadataManager, + PrefixManagerImpl pm = new PrefixManagerImpl(ozoneManager, snapshotMetadataManager, false); KeyManagerImpl km = new KeyManagerImpl(ozoneManager, ozoneManager.getScmClient(), snapshotMetadataManager, conf, @@ -375,7 +389,8 @@ public OmSnapshot load(@Nonnull String snapshotTableKey) return new OmSnapshot(km, pm, ozoneManager, snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), - snapshotInfo.getName()); + snapshotInfo.getName(), + snapshotInfo.getSnapshotId()); } catch (Exception e) { // Close RocksDB if there is any failure. if (!snapshotMetadataManager.getStore().isClosed()) { @@ -397,11 +412,32 @@ private static CodecRegistry createCodecRegistryForSnapDiff() { } /** - * Get snapshot instance LRU cache. - * @return LoadingCache + * Get snapshot instance LRU cache size. + * @return cache size. */ - public SnapshotCache getSnapshotCache() { - return snapshotCache; + @VisibleForTesting + public int getSnapshotCacheSize() { + return snapshotCache == null ? 0 : snapshotCache.size(); + } + + /** + * Immediately invalidate all entries and close their DB instances in cache. + */ + public void invalidateCache() { + if (snapshotCache != null) { + snapshotCache.invalidateAll(); + } + } + + /** + * Immediately invalidate an entry. + * + * @param key SnapshotId. + */ + public void invalidateCacheEntry(UUID key) { + if (snapshotCache != null) { + snapshotCache.invalidate(key); + } } /** @@ -590,11 +626,11 @@ private static void deleteKeysFromDelKeyTableInSnapshotScope( } // Get OmSnapshot if the keyName has ".snapshot" key indicator - public ReferenceCounted checkForSnapshot( + @SuppressWarnings("unchecked") + public ReferenceCounted getActiveFsMetadataOrSnapshot( String volumeName, String bucketName, - String keyName, - boolean skipActiveCheck) throws IOException { + String keyName) throws IOException { if (keyName == null || !ozoneManager.isFilesystemSnapshotEnabled()) { return ozoneManager.getOmMetadataReader(); } @@ -603,31 +639,57 @@ public ReferenceCounted checkForSnapshot( String[] keyParts = keyName.split(OM_KEY_PREFIX); if (isSnapshotKey(keyParts)) { String snapshotName = keyParts[1]; - if (snapshotName == null || snapshotName.isEmpty()) { - // don't allow snapshot indicator without snapshot name - throw new OMException(INVALID_KEY_NAME); - } - String snapshotTableKey = SnapshotInfo.getTableKey(volumeName, - bucketName, snapshotName); - - // Block FS API reads when snapshot is not active. - if (!skipActiveCheck) { - checkSnapshotActive(ozoneManager, snapshotTableKey); - } - // Warn if actual cache size exceeds the soft limit already. - if (snapshotCache.size() > softCacheSize) { - LOG.warn("Snapshot cache size ({}) exceeds configured soft-limit ({}).", - snapshotCache.size(), softCacheSize); - } - - // retrieve the snapshot from the cache - return snapshotCache.get(snapshotTableKey, skipActiveCheck); + return (ReferenceCounted) (ReferenceCounted) + getActiveSnapshot(volumeName, bucketName, snapshotName); } else { return ozoneManager.getOmMetadataReader(); } } + public ReferenceCounted getActiveSnapshot( + String volumeName, + String bucketName, + String snapshotName) throws IOException { + return getSnapshot(volumeName, bucketName, snapshotName, false); + } + + public ReferenceCounted getSnapshot( + String volumeName, + String bucketName, + String snapshotName) throws IOException { + return getSnapshot(volumeName, bucketName, snapshotName, true); + } + + private ReferenceCounted getSnapshot( + String volumeName, + String bucketName, + String snapshotName, + boolean skipActiveCheck) throws IOException { + + if (snapshotName == null || snapshotName.isEmpty()) { + // don't allow snapshot indicator without snapshot name + throw new OMException(INVALID_KEY_NAME); + } + + String snapshotTableKey = SnapshotInfo.getTableKey(volumeName, + bucketName, snapshotName); + + return getSnapshot(snapshotTableKey, skipActiveCheck); + } + + private ReferenceCounted getSnapshot(String snapshotTableKey, boolean skipActiveCheck) + throws IOException { + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, snapshotTableKey); + // Block FS API reads when snapshot is not active. + if (!skipActiveCheck) { + checkSnapshotActive(snapshotInfo, false); + } + + // retrieve the snapshot from the cache + return snapshotCache.get(snapshotInfo.getSnapshotId()); + } + /** * Returns true if the snapshot is in given status. * @param key DB snapshot table key @@ -894,9 +956,11 @@ public void close() { if (snapshotDiffManager != null) { snapshotDiffManager.close(); } + if (snapshotCache != null) { - snapshotCache.invalidateAll(); + snapshotCache.close(); } + if (snapshotDiffCleanupService != null) { snapshotDiffCleanupService.shutdown(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java index cb42a0a881c4..ed2527e03020 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java @@ -83,8 +83,6 @@ public static void checkAllAcls(OmMetadataReader omMetadataReader, String bucketOwner, UserGroupInformation user, InetAddress remoteAddress, String hostName) throws IOException { - boolean isVolOwner = isOwner(user, volOwner); - switch (resType) { //For Volume level access we only need to check {OWNER} equal // to Volume Owner. @@ -100,7 +98,7 @@ public static void checkAllAcls(OmMetadataReader omMetadataReader, // volume owner if current ugi user is volume owner else we need check //{OWNER} equals bucket owner for bucket/key/prefix. case PREFIX: - if (isVolOwner) { + if (isOwner(user, volOwner)) { omMetadataReader.checkAcls(resType, storeType, aclType, vol, bucket, key, user, remoteAddress, hostName, true, @@ -184,12 +182,6 @@ public static IAccessAuthorizer.ACLType getParentNativeAcl( private static boolean isOwner(UserGroupInformation callerUgi, String ownerName) { - if (ownerName == null) { - return false; - } - if (callerUgi.getShortUserName().equals(ownerName)) { - return true; - } - return false; + return ownerName != null && ownerName.equals(callerUgi.getShortUserName()); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 4b654e3d195d..912ed9fd1c74 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -76,19 +76,22 @@ import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService; -import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; +import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolOmPB; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneManagerVersion; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.lock.OMLockDetails; @@ -102,7 +105,6 @@ import org.apache.hadoop.ozone.om.service.OMRangerBGSyncService; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.security.acl.OzoneAuthorizerFactory; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; @@ -360,6 +362,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private OzoneBlockTokenSecretManager blockTokenMgr; private CertificateClient certClient; private SecretKeySignerClient secretKeyClient; + private ScmTopologyClient scmTopologyClient; private final Text omRpcAddressTxt; private OzoneConfiguration configuration; private RPC.Server omRpcServer; @@ -458,6 +461,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private final OzoneLockProvider ozoneLockProvider; private final OMPerformanceMetrics perfMetrics; + private final BucketUtilizationMetrics bucketUtilizationMetrics; private boolean fsSnapshotEnabled; @@ -490,7 +494,7 @@ private enum State { private OmMetadataReader omMetadataReader; // Wrap active DB metadata reader in ReferenceCounted once to avoid // instance creation every single time. - private ReferenceCounted rcOmMetadataReader; + private ReferenceCounted rcOmMetadataReader; private OmSnapshotManager omSnapshotManager; @SuppressWarnings("methodlength") @@ -609,6 +613,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) final StorageContainerLocationProtocol scmContainerClient = getScmContainerClient(configuration); // verifies that the SCM info in the OM Version file is correct. final ScmBlockLocationProtocol scmBlockClient = getScmBlockClient(configuration); + scmTopologyClient = new ScmTopologyClient(scmBlockClient); this.scmClient = new ScmClient(scmBlockClient, scmContainerClient, configuration); this.ozoneLockProvider = new OzoneLockProvider(getKeyPathLockEnabled(), @@ -720,6 +725,8 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) } else { omState = State.INITIALIZED; } + + bucketUtilizationMetrics = BucketUtilizationMetrics.create(metadataManager); } public boolean isStopped() { @@ -845,7 +852,7 @@ private void instantiateServices(boolean withNewSnapshot) throws IOException { delegationTokenMgr = createDelegationTokenSecretManager(configuration); } - prefixManager = new PrefixManagerImpl(metadataManager, isRatisEnabled); + prefixManager = new PrefixManagerImpl(this, metadataManager, isRatisEnabled); keyManager = new KeyManagerImpl(this, scmClient, configuration, perfMetrics); accessAuthorizer = OzoneAuthorizerFactory.forOM(this); @@ -1141,6 +1148,20 @@ public void setCertClient(CertificateClient newClient) throws IOException { serviceInfo = new ServiceInfoProvider(secConfig, this, certClient); } + /** + * For testing purpose only. This allows setting up ScmBlockLocationClient + * without having to fully setup a working cluster. + */ + @VisibleForTesting + public void setScmTopologyClient( + ScmTopologyClient scmTopologyClient) { + this.scmTopologyClient = scmTopologyClient; + } + + public NetworkTopology getClusterMap() { + return scmTopologyClient.getClusterMap(); + } + /** * For testing purpose only. This allows testing token in integration test * without fully setting up a working secure cluster. @@ -1289,7 +1310,7 @@ private RPC.Server startRpcServer(OzoneConfiguration conf, interOMProtocolService, rpcServer); HddsServerUtil.addPBProtocol(conf, OMAdminProtocolPB.class, omAdminProtocolService, rpcServer); - HddsServerUtil.addPBProtocol(conf, ReconfigureProtocolPB.class, + HddsServerUtil.addPBProtocol(conf, ReconfigureProtocolOmPB.class, reconfigureProtocolService, rpcServer); if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, @@ -1636,13 +1657,12 @@ public void start() throws IOException { metadataManager.start(configuration); + startSecretManagerIfNecessary(); // Start Ratis services if (omRatisServer != null) { omRatisServer.start(); } - startSecretManagerIfNecessary(); - upgradeFinalizer.runPrefinalizeStateActions(omStorage, this); Integer layoutVersionInDB = getLayoutVersionInDB(); if (layoutVersionInDB == null || @@ -1677,6 +1697,13 @@ public void start() throws IOException { metricsTimer = new Timer(); metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period); + try { + scmTopologyClient.start(configuration); + } catch (IOException ex) { + LOG.error("Unable to initialize network topology schema file. ", ex); + throw new UncheckedIOException(ex); + } + keyManager.start(configuration); try { @@ -2159,15 +2186,16 @@ public long getObjectIdFromTxId(long trxnId) { long getLastTrxnIndexForNonRatis() throws IOException { TransactionInfo transactionInfo = TransactionInfo.readTransactionInfo(metadataManager); - // If the OMTransactionInfo does not exist in DB or if the term is not -1 - // (corresponding to non-Ratis cluster), return 0 so that new incoming + // If the OMTransactionInfo does not exist in DB, return 0 so that new incoming // requests can have transaction index starting from 1. - if (transactionInfo == null || transactionInfo.getTerm() != -1) { + if (transactionInfo == null) { return 0; } - // If there exists a last transaction index in DB, the new incoming - // requests in non-Ratis cluster must have transaction index - // incrementally increasing from the stored transaction index onwards. + // If there exists a last transaction index in DB, including two cases: + // 1. transactionInfo.getTerm() == -1 corresponds to a non-Ratis cluster + // 2. transactionInfo.getTerm() != -1 indicates that the DB may be migrated from Ratis cluster + // For both cases above, the new incoming requests in non-Ratis cluster must have + // transaction index incrementally increasing from the stored transaction index onwards. return transactionInfo.getTransactionIndex(); } @@ -2237,6 +2265,11 @@ public boolean stop() { } keyManager.stop(); stopSecretManager(); + + if (scmTopologyClient != null) { + scmTopologyClient.stop(); + } + if (httpServer != null) { httpServer.stop(); } @@ -2266,6 +2299,10 @@ public boolean stop() { OMHAMetrics.unRegister(); } omRatisServer = null; + + if (bucketUtilizationMetrics != null) { + bucketUtilizationMetrics.unRegister(); + } return true; } catch (Exception e) { LOG.error("OzoneManager stop failed.", e); @@ -2585,8 +2622,7 @@ public boolean getAllowListAllVolumes() { return allowListAllVolumes; } - public ReferenceCounted< - IOmMetadataReader, SnapshotCache> getOmMetadataReader() { + public ReferenceCounted getOmMetadataReader() { return rcOmMetadataReader; } @@ -2856,8 +2892,7 @@ public OmBucketInfo getBucketInfo(String volume, String bucket) */ @Override public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { - try (ReferenceCounted - rcReader = getReader(args)) { + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().lookupKey(args); } } @@ -2869,8 +2904,7 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { public KeyInfoWithVolumeContext getKeyInfo(final OmKeyArgs args, boolean assumeS3Context) throws IOException { - try (ReferenceCounted rcReader = - getReader(args)) { + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().getKeyInfo(args, assumeS3Context); } } @@ -2882,7 +2916,7 @@ public KeyInfoWithVolumeContext getKeyInfo(final OmKeyArgs args, public ListKeysResult listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(volumeName, bucketName, keyPrefix)) { return rcReader.get().listKeys( volumeName, bucketName, startKey, keyPrefix, maxKeys); @@ -3720,7 +3754,7 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, */ @Override public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().getFileStatus(args); } @@ -3731,7 +3765,7 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { */ @Override public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().lookupFile(args); } @@ -3750,7 +3784,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries, boolean allowPartialPrefixes) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().listStatus( args, recursive, startKey, numEntries, allowPartialPrefixes); @@ -3774,7 +3808,7 @@ public List listStatusLight(OmKeyArgs args, */ @Override public List getAcl(OzoneObj obj) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(obj)) { return rcReader.get().getAcl(obj); } @@ -3842,7 +3876,7 @@ TermIndex installCheckpoint(String leaderId, Path checkpointLocation, keyManager.stop(); stopSecretManager(); stopTrashEmptier(); - omSnapshotManager.getSnapshotCache().invalidateAll(); + omSnapshotManager.invalidateCache(); // Pause the State Machine so that no new transactions can be applied. // This action also clears the OM Double Buffer so that if there are any // pending transactions in the buffer, they are discarded. @@ -4089,7 +4123,7 @@ private void reloadOMState() throws IOException { startSecretManagerIfNecessary(); startTrashEmptier(configuration); - // Set metrics and start metrics back ground thread + // Set metrics and start metrics background thread metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager .getVolumeTable())); metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager @@ -4103,7 +4137,7 @@ private void reloadOMState() throws IOException { metrics.setNumFiles(metadataManager .countEstimatedRowsInTable(metadataManager.getFileTable())); - // Delete the omMetrics file if it exists and save the a new metrics file + // Delete the omMetrics file if it exists and save a new metrics file // with new data Files.deleteIfExists(getMetricsStorageFile().toPath()); saveOmMetrics(); @@ -4546,13 +4580,13 @@ private OmVolumeArgs createS3VolumeContext(String s3Volume, List listOfAcls = new ArrayList<>(); //User ACL listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - userName, ACLType.ALL, ACCESS)); + userName, ACCESS, ACLType.ALL)); //Group ACLs of the User List userGroups = Arrays.asList(UserGroupInformation .createRemoteUser(userName).getGroupNames()); userGroups.forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, ACLType.ALL, ACCESS))); + new OzoneAcl(ACLIdentityType.GROUP, group, ACCESS, ACLType.ALL))); // Add ACLs for (OzoneAcl ozoneAcl : listOfAcls) { @@ -4683,7 +4717,7 @@ public EchoRPCResponse echoRPCReq(byte[] payloadReq, int payloadSizeResp, } @Override - public OmKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) { + public LeaseKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) { return null; } @@ -4795,12 +4829,10 @@ public static HddsProtos.OzoneManagerDetailsProto getOmDetailsProto( * @param keyArgs OmKeyArgs * @return ReferenceCounted */ - private ReferenceCounted< - IOmMetadataReader, SnapshotCache> getReader(OmKeyArgs keyArgs) + private ReferenceCounted getReader(OmKeyArgs keyArgs) throws IOException { - return omSnapshotManager.checkForSnapshot( - keyArgs.getVolumeName(), keyArgs.getBucketName(), keyArgs.getKeyName(), - false); + return omSnapshotManager.getActiveFsMetadataOrSnapshot( + keyArgs.getVolumeName(), keyArgs.getBucketName(), keyArgs.getKeyName()); } /** @@ -4812,11 +4844,10 @@ IOmMetadataReader, SnapshotCache> getReader(OmKeyArgs keyArgs) * @param key key path * @return ReferenceCounted */ - private ReferenceCounted< - IOmMetadataReader, SnapshotCache> getReader( + private ReferenceCounted getReader( String volumeName, String bucketName, String key) throws IOException { - return omSnapshotManager.checkForSnapshot( - volumeName, bucketName, key, false); + return omSnapshotManager.getActiveFsMetadataOrSnapshot( + volumeName, bucketName, key); } /** @@ -4826,14 +4857,12 @@ IOmMetadataReader, SnapshotCache> getReader( * @param ozoneObj OzoneObj * @return ReferenceCounted */ - private ReferenceCounted< - IOmMetadataReader, SnapshotCache> getReader(OzoneObj ozoneObj) + private ReferenceCounted getReader(OzoneObj ozoneObj) throws IOException { - return omSnapshotManager.checkForSnapshot( + return omSnapshotManager.getActiveFsMetadataOrSnapshot( ozoneObj.getVolumeName(), ozoneObj.getBucketName(), - ozoneObj.getKeyName(), - false); + ozoneObj.getKeyName()); } @SuppressWarnings("parameternumber") @@ -4846,9 +4875,13 @@ public SnapshotDiffResponse snapshotDiff(String volume, boolean forceFullDiff, boolean disableNativeDiff) throws IOException { - return omSnapshotManager.getSnapshotDiffReport(volume, bucket, - fromSnapshot, toSnapshot, token, pageSize, forceFullDiff, - disableNativeDiff); + + if (isAclEnabled) { + omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, volume, bucket, null); + } + + return omSnapshotManager.getSnapshotDiffReport(volume, bucket, fromSnapshot, toSnapshot, + token, pageSize, forceFullDiff, disableNativeDiff); } public CancelSnapshotDiffResponse cancelSnapshotDiff(String volume, @@ -4856,8 +4889,12 @@ public CancelSnapshotDiffResponse cancelSnapshotDiff(String volume, String fromSnapshot, String toSnapshot) throws IOException { - return omSnapshotManager.cancelSnapshotDiff(volume, bucket, - fromSnapshot, toSnapshot); + + if (isAclEnabled) { + omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, volume, bucket, null); + } + + return omSnapshotManager.cancelSnapshotDiff(volume, bucket, fromSnapshot, toSnapshot); } public List listSnapshotDiffJobs(String volume, @@ -4865,8 +4902,12 @@ public List listSnapshotDiffJobs(String volume, String jobStatus, boolean listAll) throws IOException { - return omSnapshotManager.getSnapshotDiffList(volume, - bucket, jobStatus, listAll); + + if (isAclEnabled) { + omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.LIST, volume, bucket, null); + } + + return omSnapshotManager.getSnapshotDiffList(volume, bucket, jobStatus, listAll); } public String printCompactionLogDag(String fileNamePrefix, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index d801d1dbf331..e8e930891df6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -18,6 +18,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -39,9 +41,12 @@ import java.util.Objects; import java.util.stream.Collectors; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.PREFIX_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; @@ -53,6 +58,7 @@ public class PrefixManagerImpl implements PrefixManager { LoggerFactory.getLogger(PrefixManagerImpl.class); private static final List EMPTY_ACL_LIST = new ArrayList<>(); + private final OzoneManager ozoneManager; private final OMMetadataManager metadataManager; // In-memory prefix tree to optimize ACL evaluation @@ -62,9 +68,10 @@ public class PrefixManagerImpl implements PrefixManager { // where we integrate both HA and Non-HA code. private boolean isRatisEnabled; - public PrefixManagerImpl(OMMetadataManager metadataManager, + public PrefixManagerImpl(OzoneManager ozoneManager, OMMetadataManager metadataManager, boolean isRatisEnabled) { this.isRatisEnabled = isRatisEnabled; + this.ozoneManager = ozoneManager; this.metadataManager = metadataManager; loadPrefixTree(); } @@ -90,16 +97,11 @@ public OMMetadataManager getMetadataManager() { return metadataManager; } - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ @Override public List getAcl(OzoneObj obj) throws IOException { validateOzoneObj(obj); - String prefixPath = obj.getPath(); + OzoneObj resolvedObj = getResolvedPrefixObj(obj); + String prefixPath = resolvedObj.getPath(); metadataManager.getLock().acquireReadLock(PREFIX_LOCK, prefixPath); try { String longestPrefix = prefixTree.getLongestPrefix(prefixPath); @@ -149,7 +151,14 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) Objects.requireNonNull(ozObject); Objects.requireNonNull(context); - String prefixPath = ozObject.getPath(); + OzoneObj resolvedObj; + try { + resolvedObj = getResolvedPrefixObj(ozObject); + } catch (IOException e) { + throw new OMException("Failed to resolveBucketLink:", e, INTERNAL_ERROR); + } + + String prefixPath = resolvedObj.getPath(); metadataManager.getLock().acquireReadLock(PREFIX_LOCK, prefixPath); try { String longestPrefix = prefixTree.getLongestPrefix(prefixPath); @@ -218,8 +227,8 @@ public void validateOzoneObj(OzoneObj obj) throws OMException { throw new OMException("Prefix name is required.", PREFIX_NOT_FOUND); } if (!prefixName.endsWith("/")) { - throw new OMException("Invalid prefix name: " + prefixName, - PREFIX_NOT_FOUND); + throw new OMException("Missing trailing slash '/' in prefix name: " + prefixName, + INVALID_PATH_IN_ACL_REQUEST); } } @@ -294,7 +303,7 @@ private void inheritParentAcl(OzoneObj ozoneObj, OmPrefixInfo prefixInfo) OmPrefixInfo parentPrefixInfo = prefixList.get(prefixList.size() - 1); if (parentPrefixInfo != null) { prefixParentFound = OzoneAclUtil.inheritDefaultAcls( - aclsToBeSet, parentPrefixInfo.getAcls()); + aclsToBeSet, parentPrefixInfo.getAcls(), ACCESS); } } @@ -305,13 +314,14 @@ private void inheritParentAcl(OzoneObj ozoneObj, OmPrefixInfo prefixInfo) OmBucketInfo bucketInfo = metadataManager.getBucketTable(). get(bucketKey); if (bucketInfo != null) { - OzoneAclUtil.inheritDefaultAcls(aclsToBeSet, bucketInfo.getAcls()); + OzoneAclUtil.inheritDefaultAcls(aclsToBeSet, bucketInfo.getAcls(), ACCESS); } } } public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, OmPrefixInfo prefixInfo, long transactionLogIndex) throws IOException { + boolean newPrefix = false; if (prefixInfo == null) { OmPrefixInfo.Builder prefixInfoBuilder = new OmPrefixInfo.Builder() @@ -322,10 +332,13 @@ public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, prefixInfoBuilder.setUpdateID(transactionLogIndex); } prefixInfo = prefixInfoBuilder.build(); + newPrefix = true; } boolean changed = prefixInfo.setAcls(ozoneAcls); - inheritParentAcl(ozoneObj, prefixInfo); + if (newPrefix) { + inheritParentAcl(ozoneObj, prefixInfo); + } prefixTree.insert(ozoneObj.getPath(), prefixInfo); if (!isRatisEnabled) { metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); @@ -333,12 +346,31 @@ public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, return new OMPrefixAclOpResult(prefixInfo, changed); } + /** + * Get the resolved prefix object to handle prefix that is under a link bucket. + * @param obj prefix object + * @return the resolved prefix object if the object belongs under a link bucket. + * Otherwise, return the same prefix object. + * @throws IOException Exception thrown when resolving the bucket link. + */ + public OzoneObj getResolvedPrefixObj(OzoneObj obj) throws IOException { + if (StringUtils.isEmpty(obj.getVolumeName()) || StringUtils.isEmpty(obj.getBucketName())) { + return obj; + } + + ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink( + Pair.of(obj.getVolumeName(), obj.getBucketName())); + return resolvedBucket.update(obj); + } + /** * Result of the prefix acl operation. */ public static class OMPrefixAclOpResult { - private OmPrefixInfo omPrefixInfo; - private boolean operationsResult; + /** The updated prefix info after applying the prefix acl operation. */ + private final OmPrefixInfo omPrefixInfo; + /** Operation result, success if the underlying ACL is changed, false otherwise. */ + private final boolean operationsResult; public OMPrefixAclOpResult(OmPrefixInfo omPrefixInfo, boolean operationsResult) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java index 9c304ac2f1cc..af1db8bad368 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java @@ -23,6 +23,8 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import java.util.LinkedHashMap; import java.util.Map; @@ -120,6 +122,15 @@ public KeyArgs update(KeyArgs args) { : args; } + public OzoneObj update(OzoneObj ozoneObj) { + return isLink() + ? OzoneObjInfo.Builder.fromOzoneObj(ozoneObj) + .setVolumeName(realVolume()) + .setBucketName(realBucket()) + .build() + : ozoneObj; + } + public boolean isLink() { return !Objects.equals(requestedVolume, realVolume) || !Objects.equals(requestedBucket, realBucket); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java index 18deca1a4ff0..60353590e75c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java @@ -348,6 +348,14 @@ public synchronized void addSnapshot(SnapshotInfo snapshotInfo) snapshotInfo.getTableKey()); } + /** + * Update snapshot chain when snapshot changes (e.g. renamed). + */ + public synchronized void updateSnapshot(SnapshotInfo snapshotInfo) { + snapshotIdToTableKey.computeIfPresent(snapshotInfo.getSnapshotId(), + (snapshotId, dbTableKey) -> snapshotInfo.getTableKey()); + } + /** * Delete snapshot from snapshot chain. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index cae9bc4b3fca..20d0ab0e53eb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -147,10 +146,9 @@ private void markSSTFilteredFlagForSnapshot(String volume, String bucket, @Override public BackgroundTaskResult call() throws Exception { - Optional snapshotCache = Optional.ofNullable(ozoneManager) - .map(OzoneManager::getOmSnapshotManager) - .map(OmSnapshotManager::getSnapshotCache); - if (!snapshotCache.isPresent()) { + Optional snapshotManager = Optional.ofNullable(ozoneManager) + .map(OzoneManager::getOmSnapshotManager); + if (!snapshotManager.isPresent()) { return BackgroundTaskResult.EmptyTaskResult.newResult(); } Table snapshotInfoTable = @@ -183,10 +181,12 @@ public BackgroundTaskResult call() throws Exception { snapshotInfo.getBucketName()); try ( - ReferenceCounted - snapshotMetadataReader = snapshotCache.get().get( - snapshotInfo.getTableKey())) { - OmSnapshot omSnapshot = (OmSnapshot) snapshotMetadataReader.get(); + ReferenceCounted snapshotMetadataReader = + snapshotManager.get().getActiveSnapshot( + snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), + snapshotInfo.getName())) { + OmSnapshot omSnapshot = snapshotMetadataReader.get(); RDBStore rdbStore = (RDBStore) omSnapshot.getMetadataManager() .getStore(); RocksDatabase db = rdbStore.getDb(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerRestMultiTenantAccessController.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerRestMultiTenantAccessController.java deleted file mode 100644 index de2987090906..000000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerRestMultiTenantAccessController.java +++ /dev/null @@ -1,681 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.multitenant; - -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonDeserializationContext; -import com.google.gson.JsonDeserializer; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; -import com.google.gson.JsonParseException; -import com.google.gson.JsonParser; -import com.google.gson.JsonPrimitive; -import com.google.gson.JsonSerializationContext; -import com.google.gson.JsonSerializer; -import org.apache.commons.lang3.NotImplementedException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.http.auth.BasicUserPrincipal; -import org.apache.kerby.util.Base64; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManager; -import javax.net.ssl.X509TrustManager; -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.lang.reflect.Type; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumMap; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_PASSWD; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_USER; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_HTTPS_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_CONNECTION_TIMEOUT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_CONNECTION_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_IGNORE_SERVER_CERT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_IGNORE_SERVER_CERT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_SERVICE; - -/** - * Access controller for multi-tenancy implemented using Ranger's REST API. - * This class is for testing and is not intended for production use. - * - * TODO: REMOVE. - */ -public class RangerRestMultiTenantAccessController - implements MultiTenantAccessController { - - public static final String OZONE_RANGER_POLICY_HTTP_ENDPOINT = - "/service/public/v2/api/policy/"; - - public static final String OZONE_RANGER_ROLE_HTTP_ENDPOINT = - "/service/public/v2/api/roles/"; - - private String getPolicyByNameEndpoint(String policyName) { - // /service/public/v2/api/service/{servicename}/policy/{policyname} - return rangerHttpsAddress + "/service/public/v2/api/service/" + - rangerService + "/policy/" + policyName; - } - - private String getRoleByNameEndpoint(String roleName) { - // /service/public/v2/api/roles/name/ - return rangerHttpsAddress + "/service/public/v2/api/roles/name/" + roleName; - } - - private static final Logger LOG = LoggerFactory - .getLogger(RangerRestMultiTenantAccessController.class); - - private final OzoneConfiguration conf; - private boolean ignoreServerCert = false; - private int connectionTimeout; - private int connectionRequestTimeout; - private String authHeaderValue; - private final String rangerHttpsAddress; - private final Gson jsonConverter; - private final String rangerService; - private final Map aclToString; - private final Map stringToAcl; - private long lastPolicyUpdateTimeEpochMillis = -1; - - public RangerRestMultiTenantAccessController(Configuration configuration) - throws IOException { - conf = new OzoneConfiguration(configuration); - rangerHttpsAddress = conf.get(OZONE_RANGER_HTTPS_ADDRESS_KEY); - rangerService = conf.get(OZONE_RANGER_SERVICE); - - GsonBuilder gsonBuilder = new GsonBuilder(); - gsonBuilder.registerTypeAdapter(Policy.class, policySerializer); - gsonBuilder.registerTypeAdapter(Policy.class, policyDeserializer); - gsonBuilder.registerTypeAdapter(Role.class, roleSerializer); - gsonBuilder.registerTypeAdapter(Role.class, roleDeserializer); - gsonBuilder.registerTypeAdapter(BasicUserPrincipal.class, userSerializer); - jsonConverter = gsonBuilder.create(); - - aclToString = new EnumMap<>(IAccessAuthorizer.ACLType.class); - stringToAcl = new HashMap<>(); - fillRangerAclStrings(); - initializeRangerConnection(); - } - - private void fillRangerAclStrings() { - aclToString.put(IAccessAuthorizer.ACLType.ALL, "all"); - aclToString.put(IAccessAuthorizer.ACLType.LIST, "list"); - aclToString.put(IAccessAuthorizer.ACLType.READ, "read"); - aclToString.put(IAccessAuthorizer.ACLType.WRITE, "write"); - aclToString.put(IAccessAuthorizer.ACLType.CREATE, "create"); - aclToString.put(IAccessAuthorizer.ACLType.DELETE, "delete"); - aclToString.put(IAccessAuthorizer.ACLType.READ_ACL, "read_acl"); - aclToString.put(IAccessAuthorizer.ACLType.WRITE_ACL, "write_acl"); - aclToString.put(IAccessAuthorizer.ACLType.NONE, ""); - - stringToAcl.put("all", IAccessAuthorizer.ACLType.ALL); - stringToAcl.put("list", IAccessAuthorizer.ACLType.LIST); - stringToAcl.put("read", IAccessAuthorizer.ACLType.READ); - stringToAcl.put("write", IAccessAuthorizer.ACLType.WRITE); - stringToAcl.put("create", IAccessAuthorizer.ACLType.CREATE); - stringToAcl.put("delete", IAccessAuthorizer.ACLType.DELETE); - stringToAcl.put("read_acl", IAccessAuthorizer.ACLType.READ_ACL); - stringToAcl.put("write_acl", IAccessAuthorizer.ACLType.WRITE_ACL); - stringToAcl.put("", IAccessAuthorizer.ACLType.NONE); - } - - private void initializeRangerConnection() { - setupRangerConnectionConfig(); - if (ignoreServerCert) { - setupRangerIgnoreServerCertificate(); - } - setupRangerConnectionAuthHeader(); - } - - private void setupRangerConnectionConfig() { - connectionTimeout = (int) conf.getTimeDuration( - OZONE_RANGER_OM_CONNECTION_TIMEOUT, - conf.get( - OZONE_RANGER_OM_CONNECTION_TIMEOUT, - OZONE_RANGER_OM_CONNECTION_TIMEOUT_DEFAULT), - TimeUnit.MILLISECONDS); - connectionRequestTimeout = (int)conf.getTimeDuration( - OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT, - conf.get( - OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT, - OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT), - TimeUnit.MILLISECONDS - ); - ignoreServerCert = conf.getBoolean( - OZONE_RANGER_OM_IGNORE_SERVER_CERT, - OZONE_RANGER_OM_IGNORE_SERVER_CERT_DEFAULT); - } - - private void setupRangerIgnoreServerCertificate() { - // Create a trust manager that does not validate certificate chains - TrustManager[] trustAllCerts = new TrustManager[]{ - new X509TrustManager() { - public java.security.cert.X509Certificate[] getAcceptedIssuers() { - return null; - } - public void checkClientTrusted( - java.security.cert.X509Certificate[] certs, String authType) { - } - public void checkServerTrusted( - java.security.cert.X509Certificate[] certs, String authType) { - } - } - }; - - try { - SSLContext sc = SSLContext.getInstance("SSL"); - sc.init(null, trustAllCerts, new java.security.SecureRandom()); - HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); - } catch (Exception e) { - LOG.info("Setting DefaultSSLSocketFactory failed."); - } - } - - private void setupRangerConnectionAuthHeader() { - String userName = conf.get(OZONE_OM_RANGER_HTTPS_ADMIN_API_USER); - String passwd = conf.get(OZONE_OM_RANGER_HTTPS_ADMIN_API_PASSWD); - String auth = userName + ":" + passwd; - byte[] encodedAuth = - Base64.encodeBase64(auth.getBytes(StandardCharsets.UTF_8)); - authHeaderValue = "Basic " + - new String(encodedAuth, StandardCharsets.UTF_8); - } - - - @Override - public Policy createPolicy(Policy policy) throws IOException { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_POLICY_HTTP_ENDPOINT; - HttpsURLConnection conn = makeHttpsPostCall(rangerAdminUrl, - jsonConverter.toJsonTree(policy).getAsJsonObject()); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to create policy %s. " + - "Http response code: %d", policy.getName(), conn.getResponseCode())); - } - getResponseData(conn); - - // TODO: Should reconstruct from response data. - return policy; - } - - @Override - public void deletePolicy(String policyName) throws IOException { - String rangerAdminUrl = getPolicyByNameEndpoint(policyName); - HttpsURLConnection conn = makeHttpsDeleteCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to delete policy '%s'. " + - "Http response code: %d", policyName, conn.getResponseCode())); - } - } - - public Map getPolicies() throws Exception { - // This API gets all policies for all services. The - // /public/v2/api/policies/{serviceDefName}/for-resource endpoint is - // supposed to get policies for only a specified service, but it does not - // seem to work. This implementation should be ok for testing purposes as - // this class is intended. - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_POLICY_HTTP_ENDPOINT; - HttpsURLConnection conn = makeHttpsGetCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to get all policies. " + - "Http response code: %d", conn.getResponseCode())); - } - String allPoliciesString = getResponseData(conn); - // Filter out policies not for Ozone service. - JsonArray jsonPoliciesArray = new JsonParser().parse(allPoliciesString) - .getAsJsonArray(); - Map policies = new HashMap<>(); - for (JsonElement jsonPolicy: jsonPoliciesArray) { - JsonObject jsonPolicyObject = jsonPolicy.getAsJsonObject(); - String service = jsonPolicyObject.get("service").getAsString(); - if (service.equals(rangerService)) { - long id = jsonPolicyObject.get("id").getAsLong(); - policies.put(id, jsonConverter.fromJson(jsonPolicyObject, - Policy.class)); - } - } - - return policies; - } - - @Override - public Policy getPolicy(String policyName) throws IOException { - String rangerAdminUrl = getPolicyByNameEndpoint(policyName); - - HttpsURLConnection conn = makeHttpsGetCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to get policy '%s'. " + - "Http response code: %d", policyName, conn.getResponseCode())); - } - String policyInfo = getResponseData(conn); - return jsonConverter.fromJson(policyInfo, Policy.class); - } - - @Override - public List getLabeledPolicies(String label) throws IOException { - throw new NotImplementedException("Not Implemented"); - } - - @Override - public Policy updatePolicy(Policy policy) throws IOException { - throw new NotImplementedException("Not Implemented"); - } - - public void updatePolicy(long policyID, Policy policy) throws IOException { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_POLICY_HTTP_ENDPOINT + policyID; - - HttpsURLConnection conn = makeHttpsPutCall(rangerAdminUrl, - jsonConverter.toJsonTree(policy)); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to update policy %d. " + - "Http response code: %d", policyID, conn.getResponseCode())); - } - } - - @Override - public Role createRole(Role role) throws IOException { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_ROLE_HTTP_ENDPOINT; - - HttpsURLConnection conn = makeHttpsPostCall(rangerAdminUrl, - jsonConverter.toJsonTree(role).getAsJsonObject()); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to create role %s. " + - "Http response code: %d", role.getName(), conn.getResponseCode())); - } - String responseString = getResponseData(conn); - JsonObject jObject = new JsonParser().parse(responseString) - .getAsJsonObject(); -// return jObject.get("id").getAsLong(); - - // TODO: Should reconstruct from response data. - return role; - } - - @Override - public void deleteRole(String roleName) throws IOException { - String rangerAdminUrl = getRoleByNameEndpoint(roleName); - HttpsURLConnection conn = makeHttpsDeleteCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to delete role '%s'. " + - "Http response code: %d", roleName, conn.getResponseCode())); - } - } - - @Override - public long getRangerServicePolicyVersion() throws IOException { - throw new NotImplementedException("Not Implemented"); - } - - public Map getRoles() throws Exception { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_ROLE_HTTP_ENDPOINT; - HttpsURLConnection conn = makeHttpsGetCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to get all roles. " + - "Http response code: %d", conn.getResponseCode())); - } - - String allRolesString = getResponseData(conn); - JsonArray rolesArrayJson = - new JsonParser().parse(allRolesString).getAsJsonArray(); - Map roles = new HashMap<>(); - for (JsonElement roleJson: rolesArrayJson) { - long id = roleJson.getAsJsonObject().get("id").getAsLong(); - roles.put(id, jsonConverter.fromJson(roleJson, Role.class)); - } - - return roles; - } - - @Override - public Role getRole(String roleName) throws IOException { - String rangerAdminUrl = getRoleByNameEndpoint(roleName); - - HttpsURLConnection conn = makeHttpsGetCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to get role '%s'. " + - "Http response code: %d", roleName, conn.getResponseCode())); - } - String roleInfo = getResponseData(conn); - return jsonConverter.fromJson(roleInfo, Role.class); - } - - @Override - public Role updateRole(long roleId, Role role) throws IOException { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_ROLE_HTTP_ENDPOINT + roleId; - - HttpsURLConnection conn = makeHttpsPutCall(rangerAdminUrl, - jsonConverter.toJsonTree(role)); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to update role %d. " + - "Http response code: %d", roleId, conn.getResponseCode())); - } - - // TODO: Should reconstruct from response data. - return role; - } - - private HttpsURLConnection makeHttpsPutCall(String url, JsonElement content) - throws IOException { - HttpsURLConnection connection = makeBaseHttpsURLConnection(url); - connection.setRequestMethod("PUT"); - return addJsonContentToConnection(connection, content); - } - - private HttpsURLConnection makeHttpsPostCall(String url, JsonElement content) - throws IOException { - HttpsURLConnection connection = makeBaseHttpsURLConnection(url); - connection.setRequestMethod("POST"); - return addJsonContentToConnection(connection, content); - } - - private HttpsURLConnection addJsonContentToConnection( - HttpsURLConnection connection, JsonElement content) throws IOException { - connection.setDoOutput(true); - connection.setRequestProperty("Content-Type", "application/json;"); - try (OutputStream os = connection.getOutputStream()) { - byte[] input = content.toString().getBytes(StandardCharsets.UTF_8); - os.write(input, 0, input.length); - os.flush(); - } - - return connection; - } - - private HttpsURLConnection makeHttpsGetCall(String urlString) - throws IOException { - HttpsURLConnection connection = makeBaseHttpsURLConnection(urlString); - connection.setRequestMethod("GET"); - return connection; - } - - private HttpsURLConnection makeHttpsDeleteCall(String urlString) - throws IOException { - HttpsURLConnection connection = makeBaseHttpsURLConnection(urlString); - connection.setRequestMethod("DELETE"); - return connection; - } - - private HttpsURLConnection makeBaseHttpsURLConnection(String urlString) - throws IOException { - URL url = new URL(urlString); - HttpsURLConnection urlConnection = (HttpsURLConnection)url.openConnection(); - urlConnection.setConnectTimeout(connectionTimeout); - urlConnection.setReadTimeout(connectionRequestTimeout); - urlConnection.setRequestProperty("Accept", "application/json"); - urlConnection.setRequestProperty("Authorization", authHeaderValue); - - return urlConnection; - } - - private String getResponseData(HttpsURLConnection urlConnection) - throws IOException { - StringBuilder response = new StringBuilder(); - try (BufferedReader br = new BufferedReader( - new InputStreamReader( - urlConnection.getInputStream(), StandardCharsets.UTF_8))) { - String responseLine; - while ((responseLine = br.readLine()) != null) { - response.append(responseLine.trim()); - } - } - return response.toString(); - } - - private boolean successfulResponseCode(long responseCode) { - return responseCode >= 200 && responseCode < 300; - } - - /// SERIALIZATION /// - - private final JsonDeserializer policyDeserializer = - new JsonDeserializer() { - @Override public Policy deserialize(JsonElement jsonElement, Type type, - JsonDeserializationContext jsonDeserializationContext) - throws JsonParseException { - JsonObject policyJson = jsonElement.getAsJsonObject(); - String name = policyJson.get("name").getAsString(); - Policy.Builder policyB = new Policy.Builder(); - policyB.setName(name); - if (policyJson.has("description")) { - policyB.setDescription(policyJson.get("description").getAsString()); - } - policyB.setEnabled(policyJson.get("isEnabled").getAsBoolean()); - - // Read volume, bucket, keys from json. - JsonObject resourcesJson = - policyJson.get("resources").getAsJsonObject(); - // All Ozone Ranger policies specify at least a volume. - JsonObject jsonVolumeResource = - resourcesJson.get("volume").getAsJsonObject(); - JsonArray volumes = jsonVolumeResource.get("values").getAsJsonArray(); - volumes.forEach(vol -> policyB.addVolume(vol.getAsString())); - - if (resourcesJson.has("bucket")) { - JsonObject jsonBucketResource = - resourcesJson.get("bucket").getAsJsonObject(); - JsonArray buckets = - jsonBucketResource.get("values").getAsJsonArray(); - buckets.forEach(bucket -> policyB.addBucket(bucket.getAsString())); - } - - if (resourcesJson.has("key")) { - JsonObject jsonKeysResource = - resourcesJson.get("key").getAsJsonObject(); - JsonArray keys = jsonKeysResource.get("values").getAsJsonArray(); - keys.forEach(key -> policyB.addKey(key.getAsString())); - } - - // Read Roles and their ACLs. - JsonArray policyItemsJson = policyJson.getAsJsonArray("policyItems"); - for (JsonElement policyItemElement : policyItemsJson) { - JsonObject policyItemJson = policyItemElement.getAsJsonObject(); - JsonArray jsonRoles = policyItemJson.getAsJsonArray("roles"); - JsonArray jsonAclArray = policyItemJson.getAsJsonArray("accesses"); - - for (JsonElement jsonAclElem : jsonAclArray) { - JsonObject jsonAcl = jsonAclElem.getAsJsonObject(); - String aclType = jsonAcl.get("type").getAsString(); - Acl acl; - if (jsonAcl.get("isAllowed").getAsBoolean()) { - acl = Acl.allow(stringToAcl.get(aclType)); - } else { - acl = Acl.deny(stringToAcl.get(aclType)); - } - - for (JsonElement roleNameJson : jsonRoles) { - policyB.addRoleAcl(roleNameJson.getAsString(), - Collections.singleton(acl)); - } - } - } - - return policyB.build(); - } - }; - - private final JsonDeserializer roleDeserializer = - new JsonDeserializer() { - @Override public Role deserialize(JsonElement jsonElement, Type type, - JsonDeserializationContext jsonDeserializationContext) - throws JsonParseException { - JsonObject roleJson = jsonElement.getAsJsonObject(); - String name = roleJson.get("name").getAsString(); - Role.Builder role = new Role.Builder(); - role.setName(name); - if (roleJson.has("description")) { - role.setDescription(roleJson.get("description").getAsString()); - } - for (JsonElement jsonUser : roleJson.get("users").getAsJsonArray()) { - String userName = - jsonUser.getAsJsonObject().get("name").getAsString(); - role.addUser(userName, false); - } - - return role.build(); - } - }; - - private final JsonSerializer policySerializer = - new JsonSerializer() { - @Override public JsonElement serialize(Policy javaPolicy, - Type typeOfSrc, JsonSerializationContext context) { - JsonObject jsonPolicy = new JsonObject(); - jsonPolicy.addProperty("name", javaPolicy.getName()); - jsonPolicy.addProperty("service", rangerService); - jsonPolicy.addProperty("isEnabled", javaPolicy.isEnabled()); - if (javaPolicy.getDescription().isPresent()) { - jsonPolicy.addProperty("description", - javaPolicy.getDescription().get()); - } - - // All resources under this policy are added to this object. - JsonObject jsonResources = new JsonObject(); - - // Add volumes. Ranger requires at least one volume to be specified. - JsonArray jsonVolumeNameArray = new JsonArray(); - for (String volumeName : javaPolicy.getVolumes()) { - jsonVolumeNameArray.add(new JsonPrimitive(volumeName)); - } - JsonObject jsonVolumeResource = new JsonObject(); - jsonVolumeResource.add("values", jsonVolumeNameArray); - jsonVolumeResource.addProperty("isRecursive", false); - jsonVolumeResource.addProperty("isExcludes", false); - jsonResources.add("volume", jsonVolumeResource); - - // Add buckets. - JsonArray jsonBucketNameArray = new JsonArray(); - for (String bucketName : javaPolicy.getBuckets()) { - jsonBucketNameArray.add(new JsonPrimitive(bucketName)); - } - - if (jsonBucketNameArray.size() > 0) { - JsonObject jsonBucketResource = new JsonObject(); - jsonBucketResource.add("values", jsonBucketNameArray); - jsonBucketResource.addProperty("isRecursive", false); - jsonBucketResource.addProperty("isExcludes", false); - jsonResources.add("bucket", jsonBucketResource); - } - - // Add keys. - JsonArray jsonKeyNameArray = new JsonArray(); - for (String keyName : javaPolicy.getKeys()) { - jsonKeyNameArray.add(new JsonPrimitive(keyName)); - } - if (jsonKeyNameArray.size() > 0) { - JsonObject jsonKeyResource = new JsonObject(); - jsonKeyResource.add("values", jsonKeyNameArray); - jsonKeyResource.addProperty("isRecursive", false); - jsonKeyResource.addProperty("isExcludes", false); - jsonResources.add("key", jsonKeyResource); - } - - jsonPolicy.add("resources", jsonResources); - - // Add roles and their acls to the policy. - JsonArray jsonPolicyItemArray = new JsonArray(); - - // Make a new policy item for each role in the map. - Map> roleAcls = javaPolicy.getRoleAcls(); - for (Map.Entry> entry : roleAcls.entrySet()) { - // Add role to the policy item. - String roleName = entry.getKey(); - JsonObject jsonPolicyItem = new JsonObject(); - JsonArray jsonRoles = new JsonArray(); - jsonRoles.add(new JsonPrimitive(roleName)); - jsonPolicyItem.add("roles", jsonRoles); - - // Add acls to the policy item. - JsonArray jsonAclArray = new JsonArray(); - for (Acl acl : entry.getValue()) { - JsonObject jsonAcl = new JsonObject(); - jsonAcl.addProperty("type", aclToString.get(acl.getAclType())); - jsonAcl.addProperty("isAllowed", acl.isAllowed()); - jsonAclArray.add(jsonAcl); - jsonPolicyItem.add("accesses", jsonAclArray); - } - jsonPolicyItemArray.add(jsonPolicyItem); - } - jsonPolicy.add("policyItems", jsonPolicyItemArray); - - return jsonPolicy; - } - }; - - private final JsonSerializer roleSerializer = - new JsonSerializer() { - @Override public JsonElement serialize(Role javaRole, Type typeOfSrc, - JsonSerializationContext context) { - JsonObject jsonRole = new JsonObject(); - jsonRole.addProperty("name", javaRole.getName()); - - JsonArray jsonUserArray = new JsonArray(); - for (String javaUser : javaRole.getUsersMap().keySet()) { - jsonUserArray.add(jsonConverter.toJsonTree(javaUser)); - } - - jsonRole.add("users", jsonUserArray); - return jsonRole; - } - }; - - private final JsonSerializer userSerializer = - new JsonSerializer() { - @Override public JsonElement serialize(BasicUserPrincipal user, - Type typeOfSrc, JsonSerializationContext context) { - JsonObject jsonMember = new JsonObject(); - jsonMember.addProperty("name", user.getName()); - jsonMember.addProperty("isAdmin", false); - return jsonMember; - } - }; - - public void setPolicyLastUpdateTime(long mtime) { - lastPolicyUpdateTimeEpochMillis = mtime; - } - - public long getPolicyLastUpdateTime() { - return lastPolicyUpdateTimeEpochMillis; - } - - public HashSet getRoleList() { - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index 2c1276c43e73..857005bd9292 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -44,7 +44,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.codec.OMDBDefinition; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -211,11 +210,15 @@ private OzoneManagerDoubleBuffer(Builder b) { this.isTracingEnabled = b.isTracingEnabled; - isRunning.set(true); // Daemon thread which runs in background and flushes transactions to DB. daemon = new Daemon(this::flushTransactions); daemon.setName(b.threadPrefix + "OMDoubleBufferFlushThread"); + } + + public OzoneManagerDoubleBuffer start() { daemon.start(); + isRunning.set(true); + return this; } private boolean isRatisEnabled() { @@ -591,29 +594,24 @@ private synchronized void swapCurrentAndReadyBuffer() { readyBuffer = temp; } - @VisibleForTesting OzoneManagerDoubleBufferMetrics getMetrics() { return metrics; } /** @return the flushed transaction count to OM DB. */ - @VisibleForTesting long getFlushedTransactionCountForTesting() { return flushedTransactionCount.get(); } /** @return total number of flush iterations run by sync thread. */ - @VisibleForTesting long getFlushIterationsForTesting() { return flushIterations.get(); } - @VisibleForTesting int getCurrentBufferSize() { return currentBuffer.size(); } - @VisibleForTesting int getReadyBufferSize() { return readyBuffer.size(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBufferMetrics.java similarity index 95% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBufferMetrics.java index 351f18528931..afa162cc3ad8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBufferMetrics.java @@ -16,9 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.ratis.metrics; - -import com.google.common.annotations.VisibleForTesting; +package org.apache.hadoop.ozone.om.ratis; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; @@ -114,8 +112,7 @@ public void updateFlushTime(long time) { flushTime.add(time); } - @VisibleForTesting - public MutableRate getFlushTime() { + MutableRate getFlushTime() { return flushTime; } @@ -142,8 +139,7 @@ public void updateFlush(int flushedTransactionsInOneIteration) { updateQueueSize(flushedTransactionsInOneIteration); } - @VisibleForTesting - public MutableStat getQueueSize() { + MutableStat getQueueSize() { return queueSize; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 6d7e117ada19..f09efe0d5b21 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -600,15 +600,12 @@ public void stop() { } } - //TODO simplify it to make it shorter - @SuppressWarnings("methodlength") public static RaftProperties newRaftProperties(ConfigurationSource conf, int port, String ratisStorageDir) { // Set RPC type - final String rpcType = conf.get( + final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(conf.get( OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_KEY, - OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_DEFAULT); - final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); + OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_DEFAULT)); final RaftProperties properties = RatisHelper.newRaftProperties(rpc); // Set the ratis port number @@ -619,129 +616,123 @@ public static RaftProperties newRaftProperties(ConfigurationSource conf, } // Set Ratis storage directory - RaftServerConfigKeys.setStorageDir(properties, - Collections.singletonList(new File(ratisStorageDir))); + RaftServerConfigKeys.setStorageDir(properties, Collections.singletonList(new File(ratisStorageDir))); + + final int logAppenderQueueByteLimit = (int) conf.getStorageSize( + OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, + OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); + + // For grpc config + setGrpcConfig(properties, logAppenderQueueByteLimit); + + setRaftLeaderElectionProperties(properties, conf); + + setRaftLogProperties(properties, logAppenderQueueByteLimit, conf); + + setRaftRpcProperties(properties, conf); + + setRaftRetryCacheProperties(properties, conf); + + setRaftSnapshotProperties(properties, conf); + + setRaftCloseThreshold(properties, conf); + + getOMHAConfigs(conf).forEach(properties::set); + return properties; + } + + private static void setRaftLeaderElectionProperties(RaftProperties properties, ConfigurationSource conf) { // Disable/enable the pre vote feature in Ratis - RaftServerConfigKeys.LeaderElection.setPreVote(properties, - conf.getBoolean(OMConfigKeys.OZONE_OM_RATIS_SERVER_ELECTION_PRE_VOTE, - OMConfigKeys.OZONE_OM_RATIS_SERVER_ELECTION_PRE_VOTE_DEFAULT)); + RaftServerConfigKeys.LeaderElection.setPreVote(properties, conf.getBoolean( + OMConfigKeys.OZONE_OM_RATIS_SERVER_ELECTION_PRE_VOTE, + OMConfigKeys.OZONE_OM_RATIS_SERVER_ELECTION_PRE_VOTE_DEFAULT)); + } + private static void setRaftLogProperties(RaftProperties properties, + int logAppenderQueueByteLimit, ConfigurationSource conf) { // Set RAFT segment size - final long raftSegmentSize = (long) conf.getStorageSize( + RaftServerConfigKeys.Log.setSegmentSizeMax(properties, SizeInBytes.valueOf((long) conf.getStorageSize( OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_KEY, - OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.setSegmentSizeMax(properties, - SizeInBytes.valueOf(raftSegmentSize)); + OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_DEFAULT, StorageUnit.BYTES))); // Set to enable RAFT to purge logs up to Snapshot Index - RaftServerConfigKeys.Log.setPurgeUptoSnapshotIndex(properties, - conf.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_UPTO_SNAPSHOT_INDEX, - OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_UPTO_SNAPSHOT_INDEX_DEFAULT - ) - ); + RaftServerConfigKeys.Log.setPurgeUptoSnapshotIndex(properties, conf.getBoolean( + OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_UPTO_SNAPSHOT_INDEX, + OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_UPTO_SNAPSHOT_INDEX_DEFAULT)); + // Set number of last RAFT logs to not be purged - RaftServerConfigKeys.Log.setPurgePreservationLogNum(properties, - conf.getLong( - OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_PRESERVATION_LOG_NUM, - OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_PRESERVATION_LOG_NUM_DEFAULT - ) - ); + RaftServerConfigKeys.Log.setPurgePreservationLogNum(properties, conf.getLong( + OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_PRESERVATION_LOG_NUM, + OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_PRESERVATION_LOG_NUM_DEFAULT)); // Set RAFT segment pre-allocated size - final long raftSegmentPreallocatedSize = (long) conf.getStorageSize( + RaftServerConfigKeys.Log.setPreallocatedSize(properties, SizeInBytes.valueOf((long) conf.getStorageSize( OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, - StorageUnit.BYTES); - int logAppenderQueueNumElements = conf.getInt( + OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, StorageUnit.BYTES))); + + // Set RAFT buffer element limit + RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, conf.getInt( OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); - final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, - logAppenderQueueNumElements); - RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties, - SizeInBytes.valueOf(logAppenderQueueByteLimit)); - RaftServerConfigKeys.Log.setWriteBufferSize(properties, - SizeInBytes.valueOf(logAppenderQueueByteLimit + 8)); - RaftServerConfigKeys.Log.setPreallocatedSize(properties, - SizeInBytes.valueOf(raftSegmentPreallocatedSize)); - RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, - false); - final int logPurgeGap = conf.getInt( + OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT)); + + RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties, SizeInBytes.valueOf(logAppenderQueueByteLimit)); + RaftServerConfigKeys.Log.setWriteBufferSize(properties, SizeInBytes.valueOf(logAppenderQueueByteLimit + 8)); + RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, false); + + RaftServerConfigKeys.Log.setPurgeGap(properties, conf.getInt( OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, - OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP_DEFAULT); - RaftServerConfigKeys.Log.setPurgeGap(properties, logPurgeGap); + OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP_DEFAULT)); + // Set the number of maximum cached segments + RaftServerConfigKeys.Log.setSegmentCacheNumMax(properties, 2); + } + + private static void setGrpcConfig(RaftProperties properties, int logAppenderQueueByteLimit) { // For grpc set the maximum message size // TODO: calculate the optimal max message size - GrpcConfigKeys.setMessageSizeMax(properties, - SizeInBytes.valueOf(logAppenderQueueByteLimit)); + GrpcConfigKeys.setMessageSizeMax(properties, SizeInBytes.valueOf(logAppenderQueueByteLimit)); + } + private static void setRaftRpcProperties(RaftProperties properties, ConfigurationSource conf) { // Set the server request timeout - TimeUnit serverRequestTimeoutUnit = - OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT.getUnit(); - long serverRequestTimeoutDuration = conf.getTimeDuration( + TimeUnit serverRequestTimeoutUnit = OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT.getUnit(); + final TimeDuration serverRequestTimeout = TimeDuration.valueOf(conf.getTimeDuration( OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_KEY, - OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT - .getDuration(), serverRequestTimeoutUnit); - final TimeDuration serverRequestTimeout = TimeDuration.valueOf( - serverRequestTimeoutDuration, serverRequestTimeoutUnit); - RaftServerConfigKeys.Rpc.setRequestTimeout(properties, - serverRequestTimeout); - - // Set timeout for server retry cache entry - TimeUnit retryCacheTimeoutUnit = OMConfigKeys - .OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT.getUnit(); - long retryCacheTimeoutDuration = conf.getTimeDuration( - OMConfigKeys.OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY, - OMConfigKeys.OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT - .getDuration(), retryCacheTimeoutUnit); - final TimeDuration retryCacheTimeout = TimeDuration.valueOf( - retryCacheTimeoutDuration, retryCacheTimeoutUnit); - RaftServerConfigKeys.RetryCache.setExpiryTime(properties, - retryCacheTimeout); + OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT.getDuration(), serverRequestTimeoutUnit), + serverRequestTimeoutUnit); + RaftServerConfigKeys.Rpc.setRequestTimeout(properties, serverRequestTimeout); // Set the server min and max timeout - TimeUnit serverMinTimeoutUnit = - OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT.getUnit(); - long serverMinTimeoutDuration = conf.getTimeDuration( + TimeUnit serverMinTimeoutUnit = OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT.getUnit(); + final TimeDuration serverMinTimeout = TimeDuration.valueOf(conf.getTimeDuration( OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, - OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT - .getDuration(), serverMinTimeoutUnit); - final TimeDuration serverMinTimeout = TimeDuration.valueOf( - serverMinTimeoutDuration, serverMinTimeoutUnit); - long serverMaxTimeoutDuration = - serverMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200; - final TimeDuration serverMaxTimeout = TimeDuration.valueOf( - serverMaxTimeoutDuration, TimeUnit.MILLISECONDS); - RaftServerConfigKeys.Rpc.setTimeoutMin(properties, - serverMinTimeout); - RaftServerConfigKeys.Rpc.setTimeoutMax(properties, - serverMaxTimeout); - - // Set the number of maximum cached segments - RaftServerConfigKeys.Log.setSegmentCacheNumMax(properties, 2); - - // TODO: set max write buffer size - - TimeUnit nodeFailureTimeoutUnit = - OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT - .getUnit(); - long nodeFailureTimeoutDuration = conf.getTimeDuration( + OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT.getDuration(), serverMinTimeoutUnit), + serverMinTimeoutUnit); + final TimeDuration serverMaxTimeout = serverMinTimeout.add(200, TimeUnit.MILLISECONDS); + RaftServerConfigKeys.Rpc.setTimeoutMin(properties, serverMinTimeout); + RaftServerConfigKeys.Rpc.setTimeoutMax(properties, serverMaxTimeout); + + // Set the server Rpc slowness timeout and Notification noLeader timeout + TimeUnit nodeFailureTimeoutUnit = OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT.getUnit(); + final TimeDuration nodeFailureTimeout = TimeDuration.valueOf(conf.getTimeDuration( OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY, - OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT - .getDuration(), nodeFailureTimeoutUnit); - final TimeDuration nodeFailureTimeout = TimeDuration.valueOf( - nodeFailureTimeoutDuration, nodeFailureTimeoutUnit); - RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties, - nodeFailureTimeout); - RaftServerConfigKeys.Rpc.setSlownessTimeout(properties, - nodeFailureTimeout); + OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT.getDuration(), nodeFailureTimeoutUnit), + nodeFailureTimeoutUnit); + RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties, nodeFailureTimeout); + RaftServerConfigKeys.Rpc.setSlownessTimeout(properties, nodeFailureTimeout); + } + private static void setRaftRetryCacheProperties(RaftProperties properties, ConfigurationSource conf) { + // Set timeout for server retry cache entry + TimeUnit retryCacheTimeoutUnit = OMConfigKeys.OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT.getUnit(); + final TimeDuration retryCacheTimeout = TimeDuration.valueOf(conf.getTimeDuration( + OMConfigKeys.OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY, + OMConfigKeys.OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT.getDuration(), retryCacheTimeoutUnit), + retryCacheTimeoutUnit); + RaftServerConfigKeys.RetryCache.setExpiryTime(properties, retryCacheTimeout); + } + + private static void setRaftSnapshotProperties(RaftProperties properties, ConfigurationSource conf) { // Set auto trigger snapshot. We don't need to configure auto trigger // threshold in OM, as last applied index is flushed during double buffer // flush automatically. (But added this property internally, so that this @@ -751,18 +742,22 @@ public static RaftProperties newRaftProperties(ConfigurationSource conf, // The transaction info value in OM DB is used as // snapshot value after restart. - RaftServerConfigKeys.Snapshot.setAutoTriggerEnabled( - properties, true); + RaftServerConfigKeys.Snapshot.setAutoTriggerEnabled(properties, true); - long snapshotAutoTriggerThreshold = conf.getLong( + RaftServerConfigKeys.Snapshot.setAutoTriggerThreshold(properties, conf.getLong( OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, - OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_DEFAULT); - - RaftServerConfigKeys.Snapshot.setAutoTriggerThreshold(properties, - snapshotAutoTriggerThreshold); - - getOMHAConfigs(conf).forEach(properties::set); - return properties; + OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_DEFAULT)); + } + + private static void setRaftCloseThreshold(RaftProperties properties, ConfigurationSource conf) { + // Set RAFT server close threshold + TimeUnit closeThresholdUnit = OMConfigKeys.OZONE_OM_RATIS_SERVER_CLOSE_THRESHOLD_DEFAULT.getUnit(); + final int closeThreshold = (int) TimeDuration.valueOf(conf.getTimeDuration( + OMConfigKeys.OZONE_OM_RATIS_SERVER_CLOSE_THRESHOLD_KEY, + OMConfigKeys.OZONE_OM_RATIS_SERVER_CLOSE_THRESHOLD_DEFAULT.getDuration(), closeThresholdUnit), + closeThresholdUnit).toLong(TimeUnit.SECONDS); + // TODO: update to new api setCloseThreshold(RaftProperties, TimeDuration) if available + RaftServerConfigKeys.setCloseThreshold(properties, closeThreshold); } private static Map getOMHAConfigs( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 90fcba40f5d0..ada061876e39 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -88,7 +88,7 @@ public class OzoneManagerStateMachine extends BaseStateMachine { private final OzoneManager ozoneManager; private RequestHandler handler; private RaftGroupId raftGroupId; - private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; + private volatile OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private final ExecutorService executorService; private final ExecutorService installSnapshotExecutor; private final boolean isTracingEnabled; @@ -109,9 +109,7 @@ public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer, this.threadPrefix = ozoneManager.getThreadNamePrefix(); this.ozoneManagerDoubleBuffer = buildDoubleBufferForRatis(); - - this.handler = new OzoneManagerRequestHandler(ozoneManager, - ozoneManagerDoubleBuffer); + this.handler = new OzoneManagerRequestHandler(ozoneManager); ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) .setNameFormat(threadPrefix + @@ -163,12 +161,18 @@ public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, /** Notified by Ratis for non-StateMachine term-index update. */ @Override public synchronized void notifyTermIndexUpdated(long currentTerm, long newIndex) { + // lastSkippedIndex is start of sequence (one less) of continuous notification from ratis + // if there is any applyTransaction (double buffer index), then this gap is handled during double buffer + // notification and lastSkippedIndex will be the start of last continuous sequence. final long oldIndex = lastNotifiedTermIndex.getIndex(); if (newIndex - oldIndex > 1) { lastSkippedIndex = newIndex - 1; } final TermIndex newTermIndex = TermIndex.valueOf(currentTerm, newIndex); lastNotifiedTermIndex = assertUpdateIncreasingly("lastNotified", lastNotifiedTermIndex, newTermIndex); + if (lastNotifiedTermIndex.getIndex() - getLastAppliedTermIndex().getIndex() == 1) { + updateLastAppliedTermIndex(lastNotifiedTermIndex); + } } public TermIndex getLastNotifiedTermIndex() { @@ -177,7 +181,15 @@ public TermIndex getLastNotifiedTermIndex() { @Override protected synchronized boolean updateLastAppliedTermIndex(TermIndex newTermIndex) { - assertUpdateIncreasingly("lastApplied", getLastAppliedTermIndex(), newTermIndex); + TermIndex lastApplied = getLastAppliedTermIndex(); + assertUpdateIncreasingly("lastApplied", lastApplied, newTermIndex); + // if newTermIndex getting updated is within sequence of notifiedTermIndex (i.e. from lastSkippedIndex and + // notifiedTermIndex), then can update directly to lastNotifiedTermIndex as it ensure previous double buffer's + // Index is notified or getting notified matching lastSkippedIndex + if (newTermIndex.getIndex() < getLastNotifiedTermIndex().getIndex() + && newTermIndex.getIndex() >= lastSkippedIndex) { + newTermIndex = getLastNotifiedTermIndex(); + } return super.updateLastAppliedTermIndex(newTermIndex); } @@ -415,7 +427,6 @@ public synchronized void unpause(long newLastAppliedSnaphsotIndex, if (statePausedCount.decrementAndGet() == 0) { getLifeCycle().startAndTransition(() -> { this.ozoneManagerDoubleBuffer = buildDoubleBufferForRatis(); - handler.updateDoubleBuffer(ozoneManagerDoubleBuffer); this.setLastAppliedTermIndex(TermIndex.valueOf( newLastAppliedSnapShotTermIndex, newLastAppliedSnaphsotIndex)); }); @@ -434,7 +445,8 @@ public OzoneManagerDoubleBuffer buildDoubleBufferForRatis() { .setS3SecretManager(ozoneManager.getS3SecretManager()) .enableRatis(true) .enableTracing(isTracingEnabled) - .build(); + .build() + .start(); } /** @@ -524,7 +536,8 @@ public void close() { */ private OMResponse runCommand(OMRequest request, TermIndex termIndex) { try { - OMClientResponse omClientResponse = handler.handleWriteRequest(request, termIndex); + final OMClientResponse omClientResponse = handler.handleWriteRequest( + request, termIndex, ozoneManagerDoubleBuffer); OMLockDetails omLockDetails = omClientResponse.getOmLockDetails(); OMResponse omResponse = omClientResponse.getOMResponse(); if (omLockDetails != null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 3ab65346e7eb..b055a1f92f82 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -79,6 +79,7 @@ import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotDeleteRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveDeletedKeysRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotPurgeRequest; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotRenameRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotSetPropertyRequest; import org.apache.hadoop.ozone.om.request.upgrade.OMCancelPrepareRequest; import org.apache.hadoop.ozone.om.request.upgrade.OMFinalizeUpgradeRequest; @@ -224,6 +225,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new OMSnapshotCreateRequest(omRequest); case DeleteSnapshot: return new OMSnapshotDeleteRequest(omRequest); + case RenameSnapshot: + return new OMSnapshotRenameRequest(omRequest); case SnapshotMoveDeletedKeys: return new OMSnapshotMoveDeletedKeysRequest(omRequest); case SnapshotPurge: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 2698d12f9f89..6c8a66ee7ea7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -42,7 +42,6 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LayoutVersion; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -79,8 +78,7 @@ public abstract class OMClientRequest implements RequestAuditor { private UserGroupInformation userGroupInformation; private InetAddress inetAddress; - private final ThreadLocal omLockDetails = - ThreadLocal.withInitial(OMLockDetails::new); + private final OMLockDetails omLockDetails = new OMLockDetails(); /** * Stores the result of request execution in @@ -95,7 +93,7 @@ public enum Result { public OMClientRequest(OMRequest omRequest) { Preconditions.checkNotNull(omRequest); this.omRequest = omRequest; - this.omLockDetails.get().clear(); + this.omLockDetails.clear(); } /** * Perform pre-execute steps on a OMRequest. @@ -296,7 +294,7 @@ protected void checkACLsWithFSO(OzoneManager ozoneManager, String volumeName, contextBuilder.setOwnerName(bucketOwner); } - try (ReferenceCounted rcMetadataReader = + try (ReferenceCounted rcMetadataReader = ozoneManager.getOmMetadataReader()) { OmMetadataReader omMetadataReader = (OmMetadataReader) rcMetadataReader.get(); @@ -362,7 +360,7 @@ public void checkAcls(OzoneManager ozoneManager, String bucketOwner) throws IOException { - try (ReferenceCounted rcMetadataReader = + try (ReferenceCounted rcMetadataReader = ozoneManager.getOmMetadataReader()) { OzoneAclUtils.checkAllAcls((OmMetadataReader) rcMetadataReader.get(), resType, storeType, aclType, @@ -576,10 +574,10 @@ public static String isValidKeyPath(String path) throws OMException { } public OMLockDetails getOmLockDetails() { - return omLockDetails.get(); + return omLockDetails; } public void mergeOmLockDetails(OMLockDetails details) { - omLockDetails.get().merge(details); + omLockDetails.merge(details); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java index 93b7c92902b6..78e67bb8ed5c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -22,6 +22,8 @@ import java.util.LinkedHashMap; import java.util.Map; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditAction; import org.apache.hadoop.ozone.audit.AuditMessage; @@ -30,6 +32,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .UserInfo; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; + /** * Interface for OM Requests to convert to audit objects. */ @@ -68,10 +72,21 @@ default Map buildKeyArgsAuditMap(KeyArgs keyArgs) { auditMap.put(OzoneConsts.KEY, keyArgs.getKeyName()); auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(keyArgs.getDataSize())); - auditMap.put(OzoneConsts.REPLICATION_TYPE, - (keyArgs.getType() != null) ? keyArgs.getType().name() : null); - auditMap.put(OzoneConsts.REPLICATION_FACTOR, - (keyArgs.getFactor() != null) ? keyArgs.getFactor().name() : null); + if (keyArgs.hasType()) { + auditMap.put(OzoneConsts.REPLICATION_TYPE, keyArgs.getType().name()); + } + if (keyArgs.hasFactor() && keyArgs.getFactor() != HddsProtos.ReplicationFactor.ZERO) { + auditMap.put(OzoneConsts.REPLICATION_FACTOR, keyArgs.getFactor().name()); + } + if (keyArgs.hasEcReplicationConfig()) { + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + ECReplicationConfig.toString(keyArgs.getEcReplicationConfig())); + } + for (HddsProtos.KeyValue item : keyArgs.getMetadataList()) { + if (ETAG.equals(item.getKey())) { + auditMap.put(ETAG, item.getValue()); + } + } return auditMap; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 7cce3ac456f9..f2c343e0d161 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -67,6 +67,7 @@ import java.util.ArrayList; import java.util.List; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -330,7 +331,7 @@ private void addDefaultAcls(OmBucketInfo omBucketInfo, // Add default acls from volume. List defaultVolumeAcls = omVolumeArgs.getDefaultAcls(); - OzoneAclUtil.inheritDefaultAcls(acls, defaultVolumeAcls); + OzoneAclUtil.inheritDefaultAcls(acls, defaultVolumeAcls, ACCESS); omBucketInfo.setAcls(acls); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index f24dee8ae650..6071110a315d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -369,6 +369,7 @@ private static OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName, .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(dirName) + .setOwnerName(keyArgs.getOwnerName()) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>()))) .setCreationTime(keyArgs.getModificationTime()) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index 53fda0f8f06f..14c140e95778 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -308,6 +308,7 @@ private static OmDirectoryInfo createDirectoryInfoWithACL( return OmDirectoryInfo.newBuilder() .setName(dirName) + .setOwner(keyArgs.getOwnerName()) .setCreationTime(keyArgs.getModificationTime()) .setModificationTime(keyArgs.getModificationTime()) .setObjectID(objectId) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index d4bc91dbfdf6..9b9fb4e7cc5c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -154,10 +154,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .map(info -> info.getProtobuf(getOmRequest().getVersion())) .collect(Collectors.toList())); + generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager); + KeyArgs resolvedArgs = resolveBucketAndCheckKeyAcls(newKeyArgs.build(), ozoneManager, IAccessAuthorizer.ACLType.CREATE); - - generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager); CreateFileRequest.Builder newCreateFileRequest = createFileRequest.toBuilder().setKeyArgs(resolvedArgs) .setClientID(UniqueId.next()); @@ -255,6 +255,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ozoneManager.getPrefixManager(), omBucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), ozoneManager.isRatisEnabled(), repConfig); + validateEncryptionKeyInfo(omBucketInfo, keyArgs); long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); long clientID = createFileRequest.getClientID(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index 393be170a5b4..8fe6d3381bc9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -172,6 +172,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), ozoneManager.isRatisEnabled(), repConfig); + validateEncryptionKeyInfo(bucketInfo, keyArgs); long openVersion = omFileInfo.getLatestVersionLocations().getVersion(); long clientID = createFileRequest.getClientID(); @@ -200,7 +201,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Even if bucket gets deleted, when commitKey we shall identify if // bucket gets deleted. OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, - dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(), + dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(), keyName, trxnLogIndex); // Add cache entries for the prefix directories. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index a1e660691cd4..3e7549b176e2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -472,15 +472,17 @@ public static void addDirectoryTableCacheEntries( */ public static void addOpenFileTableCacheEntry( OMMetadataManager omMetadataManager, String dbOpenFileName, - @Nullable OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) { + @Nullable OmKeyInfo omFileInfo, String fileName, String keyName, long trxnLogIndex) { final Table table = omMetadataManager.getOpenKeyTable( BucketLayout.FILE_SYSTEM_OPTIMIZED); if (omFileInfo != null) { - // New key format for the openFileTable. // For example, the user given key path is '/a/b/c/d/e/file1', then in DB - // keyName field stores only the leaf node name, which is 'file1'. - omFileInfo.setKeyName(fileName); + // keyName field stores full path, which is '/a/b/c/d/e/file1'. + // This is required as in some cases like hsync, Keys inside openKeyTable is used for auto commit after expiry. + // (Full key path is required in commit key request) + omFileInfo.setKeyName(keyName); + // fileName will contain only the leaf(file1) which is actual file name. omFileInfo.setFileName(fileName); table.addCacheEntry(dbOpenFileName, omFileInfo, trxnLogIndex); } else { @@ -690,7 +692,7 @@ public static OzoneFileStatus getOMKeyInfoIfExists( if (omDirInfo != null) { lastKnownParentId = omDirInfo.getObjectID(); - } else if (!elements.hasNext() && + } else if (!elements.hasNext() && (!keyName.endsWith(PATH_SEPARATOR_STR))) { // If the requested keyName contains "/" at the end then we need to // just check the directory table. @@ -751,6 +753,7 @@ public static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .getInstance(HddsProtos.ReplicationFactor.ONE)) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>()))) + .setOwnerName(dirInfo.getOwner()) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java index 798fed7dccf2..be12886a689f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java @@ -221,6 +221,10 @@ private RecoverLeaseResponse doWork(OzoneManager ozoneManager, throw new OMException("Open Key " + dbOpenFileKey + " not found in openKeyTable", KEY_NOT_FOUND); } + if (openKeyInfo.getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY)) { + throw new OMException("Open Key " + keyName + " is already deleted", + KEY_NOT_FOUND); + } long openKeyModificationTime = openKeyInfo.getModificationTime(); if (openKeyInfo.getMetadata().containsKey(OzoneConsts.LEASE_RECOVERY)) { LOG.debug("Key: " + keyName + " is already under recovery"); @@ -248,34 +252,44 @@ private RecoverLeaseResponse doWork(OzoneManager ozoneManager, List openKeyLocationInfoList = openKeyLatestVersionLocations.getLocationList(); OmKeyLocationInfo finalBlock = null; + OmKeyLocationInfo penultimateBlock = null; boolean returnKeyInfo = true; if (openKeyLocationInfoList.size() > keyLocationInfoList.size() && openKeyModificationTime > keyInfo.getModificationTime() && openKeyLocationInfoList.size() > 0) { finalBlock = openKeyLocationInfoList.get(openKeyLocationInfoList.size() - 1); + if (openKeyLocationInfoList.size() > 1) { + penultimateBlock = openKeyLocationInfoList.get(openKeyLocationInfoList.size() - 2); + } returnKeyInfo = false; } else if (keyLocationInfoList.size() > 0) { finalBlock = keyLocationInfoList.get(keyLocationInfoList.size() - 1); } - if (finalBlock != null) { + updateBlockInfo(ozoneManager, finalBlock); + updateBlockInfo(ozoneManager, penultimateBlock); + + RecoverLeaseResponse.Builder rb = RecoverLeaseResponse.newBuilder(); + rb.setKeyInfo(returnKeyInfo ? keyInfo.getNetworkProtobuf(getOmRequest().getVersion(), true) : + openKeyInfo.getNetworkProtobuf(getOmRequest().getVersion(), true)); + rb.setIsKeyInfo(returnKeyInfo); + + return rb.build(); + } + + private void updateBlockInfo(OzoneManager ozoneManager, OmKeyLocationInfo blockInfo) throws IOException { + if (blockInfo != null) { // set token to last block if enabled if (ozoneManager.isGrpcBlockTokenEnabled()) { String remoteUser = getRemoteUser().getShortUserName(); OzoneBlockTokenSecretManager secretManager = ozoneManager.getBlockTokenSecretManager(); - finalBlock.setToken(secretManager.generateToken(remoteUser, finalBlock.getBlockID(), - EnumSet.of(READ, WRITE), finalBlock.getLength())); + blockInfo.setToken(secretManager.generateToken(remoteUser, blockInfo.getBlockID(), + EnumSet.of(READ, WRITE), blockInfo.getLength())); } // refresh last block pipeline ContainerWithPipeline containerWithPipeline = - ozoneManager.getScmClient().getContainerClient().getContainerWithPipeline(finalBlock.getContainerID()); - finalBlock.setPipeline(containerWithPipeline.getPipeline()); + ozoneManager.getScmClient().getContainerClient().getContainerWithPipeline(blockInfo.getContainerID()); + blockInfo.setPipeline(containerWithPipeline.getPipeline()); } - - RecoverLeaseResponse.Builder rb = RecoverLeaseResponse.newBuilder(); - rb.setKeyInfo(returnKeyInfo ? keyInfo.getNetworkProtobuf(getOmRequest().getVersion(), true) : - openKeyInfo.getNetworkProtobuf(getOmRequest().getVersion(), true)); - - return rb.build(); } private OmKeyInfo getKey(String dbOzoneKey) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index d47b2b7a57e2..9edb07f1a14b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -208,6 +208,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn throw new OMException("Open Key " + openKeyName + " is under lease recovery", KEY_UNDER_LEASE_RECOVERY); } + if (openKeyInfo.getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY) || + openKeyInfo.getMetadata().containsKey(OzoneConsts.OVERWRITTEN_HSYNC_KEY)) { + throw new OMException("Open Key " + openKeyName + " is already deleted/overwritten", + KEY_NOT_FOUND); + } List newLocationList = Collections.singletonList( OmKeyLocationInfo.getFromProtobuf(blockLocation)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java index 4f0c9fe60248..59748696ac7f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java @@ -130,6 +130,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn throw new OMException("Open Key " + openKeyName + " is under lease recovery", KEY_UNDER_LEASE_RECOVERY); } + if (openKeyInfo.getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY) || + openKeyInfo.getMetadata().containsKey(OzoneConsts.OVERWRITTEN_HSYNC_KEY)) { + throw new OMException("Open Key " + openKeyName + " is already deleted/overwritten", + KEY_NOT_FOUND); + } List newLocationList = Collections.singletonList( OmKeyLocationInfo.getFromProtobuf(blockLocation)); @@ -160,7 +165,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn openKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); // Add to cache. - addOpenTableCacheEntry(trxnLogIndex, omMetadataManager, openKeyName, + addOpenTableCacheEntry(trxnLogIndex, omMetadataManager, openKeyName, keyName, openKeyInfo); omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder() @@ -215,11 +220,11 @@ private String getOpenKeyName(String volumeName, String bucketName, } private void addOpenTableCacheEntry(long trxnLogIndex, - OMMetadataManager omMetadataManager, String openKeyName, + OMMetadataManager omMetadataManager, String openKeyName, String keyName, OmKeyInfo openKeyInfo) { String fileName = openKeyInfo.getFileName(); OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, openKeyName, - openKeyInfo, fileName, trxnLogIndex); + openKeyInfo, fileName, keyName, trxnLogIndex); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index 505b6287307f..d6935ed683cf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -21,9 +21,11 @@ import java.io.IOException; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -39,8 +41,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import java.util.List; - +import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; /** @@ -66,6 +67,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn Set> lockSet = new HashSet<>(); Map, OmBucketInfo> volBucketInfoMap = new HashMap<>(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + Map openKeyInfoMap = new HashMap<>(); OMMetrics omMetrics = ozoneManager.getMetrics(); try { @@ -110,6 +112,21 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn volumeName, bucketName); lockSet.add(volBucketPair); } + + // If omKeyInfo has hsync metadata, delete its corresponding open key as well + String dbOpenKey; + String hsyncClientId = keyInfo.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID); + if (hsyncClientId != null) { + long parentId = keyInfo.getParentObjectID(); + dbOpenKey = omMetadataManager.getOpenFileName(path.getVolumeId(), path.getBucketId(), + parentId, keyInfo.getFileName(), hsyncClientId); + OmKeyInfo openKeyInfo = omMetadataManager.getOpenKeyTable(getBucketLayout()).get(dbOpenKey); + if (openKeyInfo != null) { + openKeyInfo.getMetadata().put(DELETED_HSYNC_KEY, "true"); + openKeyInfoMap.put(dbOpenKey, openKeyInfo); + } + } + omMetrics.decNumKeys(); OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); @@ -142,7 +159,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn getOmRequest()); OMClientResponse omClientResponse = new OMDirectoriesPurgeResponseWithFSO( omResponse.build(), purgeRequests, ozoneManager.isRatisEnabled(), - getBucketLayout(), volBucketInfoMap, fromSnapshotInfo); + getBucketLayout(), volBucketInfoMap, fromSnapshotInfo, openKeyInfoMap); return omClientResponse; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index d182e4f6c3dc..94cd63f9baa5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -221,29 +221,35 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // creation and key commit, old versions will be just overwritten and // not kept. Bucket versioning will be effective from the first key // creation after the knob turned on. - boolean isPreviousCommitHsync = false; - Map oldKeyVersionsToDeleteMap = null; OmKeyInfo keyToDelete = omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); long writerClientId = commitKeyRequest.getClientID(); - if (isRecovery && keyToDelete != null) { - String clientId = keyToDelete.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID); - if (clientId == null) { - throw new OMException("Failed to recovery key, as " + - dbOzoneKey + " is already closed", KEY_ALREADY_CLOSED); - } - writerClientId = Long.parseLong(clientId); - } - + boolean isSameHsyncKey = false; + boolean isOverwrittenHsyncKey = false; final String clientIdString = String.valueOf(writerClientId); if (null != keyToDelete) { - isPreviousCommitHsync = java.util.Optional.of(keyToDelete) + isSameHsyncKey = java.util.Optional.of(keyToDelete) .map(WithMetadata::getMetadata) .map(meta -> meta.get(OzoneConsts.HSYNC_CLIENT_ID)) .filter(id -> id.equals(clientIdString)) .isPresent(); + if (!isSameHsyncKey) { + isOverwrittenHsyncKey = java.util.Optional.of(keyToDelete) + .map(WithMetadata::getMetadata) + .map(meta -> meta.get(OzoneConsts.HSYNC_CLIENT_ID)) + .filter(id -> !id.equals(clientIdString)) + .isPresent() && !isRecovery; + } } + if (isRecovery && keyToDelete != null) { + String clientId = keyToDelete.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID); + if (clientId == null) { + throw new OMException("Failed to recovery key, as " + + dbOzoneKey + " is already closed", KEY_ALREADY_CLOSED); + } + writerClientId = Long.parseLong(clientId); + } String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, writerClientId); omKeyInfo = @@ -252,7 +258,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String action = isRecovery ? "recovery" : isHSync ? "hsync" : "commit"; throw new OMException("Failed to " + action + " key, as " + dbOpenKey + " entry is not found in the OpenKey table", KEY_NOT_FOUND); + } else if (omKeyInfo.getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY) || + omKeyInfo.getMetadata().containsKey(OzoneConsts.OVERWRITTEN_HSYNC_KEY)) { + throw new OMException("Open Key " + keyName + " is already deleted/overwritten", + KEY_NOT_FOUND); } + if (omKeyInfo.getMetadata().containsKey(OzoneConsts.LEASE_RECOVERY) && omKeyInfo.getMetadata().containsKey(OzoneConsts.HSYNC_CLIENT_ID)) { if (!isRecovery) { @@ -261,8 +272,21 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } } - omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime()); + OmKeyInfo openKeyToDelete = null; + String dbOpenKeyToDeleteKey = null; + if (isOverwrittenHsyncKey) { + // find the overwritten openKey and add OVERWRITTEN_HSYNC_KEY to it. + dbOpenKeyToDeleteKey = omMetadataManager.getOpenKey(volumeName, bucketName, + keyName, Long.parseLong(keyToDelete.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID))); + openKeyToDelete = omMetadataManager.getOpenKeyTable(getBucketLayout()).get(dbOpenKeyToDeleteKey); + openKeyToDelete.getMetadata().put(OzoneConsts.OVERWRITTEN_HSYNC_KEY, "true"); + openKeyToDelete.setModificationTime(Time.now()); + openKeyToDelete.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry( + dbOpenKeyToDeleteKey, openKeyToDelete, trxnLogIndex); + } + omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime()); // non-null indicates it is necessary to update the open key OmKeyInfo newOpenKeyInfo = null; @@ -286,10 +310,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Set the UpdateID to current transactionLogIndex omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + Map oldKeyVersionsToDeleteMap = null; long correctedSpace = omKeyInfo.getReplicatedSize(); // if keyToDelete isn't null, usedNamespace needn't check and // increase. - if (keyToDelete != null && (isHSync || isPreviousCommitHsync)) { + if (keyToDelete != null && (isSameHsyncKey)) { correctedSpace -= keyToDelete.getReplicatedSize(); checkBucketQuotaInBytes(omMetadataManager, omBucketInfo, correctedSpace); @@ -300,8 +325,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyToDelete, trxnLogIndex, ozoneManager.isRatisEnabled()); checkBucketQuotaInBytes(omMetadataManager, omBucketInfo, correctedSpace); + // using pseudoObjId as objectId can be same in case of overwrite key + long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); String delKeyName = omMetadataManager.getOzoneDeletePathKey( - keyToDelete.getObjectID(), dbOzoneKey); + pseudoObjId, dbOzoneKey); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -333,8 +360,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.put(delKeyName, - new RepeatedOmKeyInfo(pseudoKeyInfo)); + oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, + key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); } // Add to cache of open key table and key table. @@ -363,7 +390,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omClientResponse = new OMKeyCommitResponse(omResponse.build(), omKeyInfo, dbOzoneKey, dbOpenKey, omBucketInfo.copyObject(), - oldKeyVersionsToDeleteMap, isHSync, newOpenKeyInfo); + oldKeyVersionsToDeleteMap, isHSync, newOpenKeyInfo, dbOpenKeyToDeleteKey, openKeyToDelete); result = Result.SUCCESS; } catch (IOException | InvalidPathException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index f6f8f8b9cb3b..ce0fdbc742af 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -22,7 +22,9 @@ import java.util.HashMap; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.ozone.om.helpers.WithMetadata; import org.apache.hadoop.ozone.om.request.util.OmKeyHSyncUtil; +import org.apache.hadoop.util.Time; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -38,7 +40,6 @@ import org.apache.hadoop.ozone.om.helpers.OmFSOFile; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.WithMetadata; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -147,6 +148,24 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OmKeyInfo keyToDelete = omMetadataManager.getKeyTable(getBucketLayout()).get(dbFileKey); long writerClientId = commitKeyRequest.getClientID(); + boolean isSameHsyncKey = false; + boolean isOverwrittenHsyncKey = false; + final String clientIdString = String.valueOf(writerClientId); + if (null != keyToDelete) { + isSameHsyncKey = java.util.Optional.of(keyToDelete) + .map(WithMetadata::getMetadata) + .map(meta -> meta.get(OzoneConsts.HSYNC_CLIENT_ID)) + .filter(id -> id.equals(clientIdString)) + .isPresent(); + if (!isSameHsyncKey) { + isOverwrittenHsyncKey = java.util.Optional.of(keyToDelete) + .map(WithMetadata::getMetadata) + .map(meta -> meta.get(OzoneConsts.HSYNC_CLIENT_ID)) + .filter(id -> !id.equals(clientIdString)) + .isPresent() && !isRecovery; + } + } + if (isRecovery && keyToDelete != null) { String clientId = keyToDelete.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID); if (clientId == null) { @@ -162,6 +181,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String action = isRecovery ? "recovery" : isHSync ? "hsync" : "commit"; throw new OMException("Failed to " + action + " key, as " + dbOpenFileKey + " entry is not found in the OpenKey table", KEY_NOT_FOUND); + } else if (omKeyInfo.getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY) || + omKeyInfo.getMetadata().containsKey(OzoneConsts.OVERWRITTEN_HSYNC_KEY)) { + throw new OMException("Open Key " + keyName + " is already deleted/overwritten", + KEY_NOT_FOUND); } if (omKeyInfo.getMetadata().containsKey(OzoneConsts.LEASE_RECOVERY) && @@ -172,9 +195,22 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } } - omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime()); + OmKeyInfo openKeyToDelete = null; + String dbOpenKeyToDeleteKey = null; + if (isOverwrittenHsyncKey) { + // find the overwritten openKey and add OVERWRITTEN_HSYNC_KEY to it. + dbOpenKeyToDeleteKey = fsoFile.getOpenFileName( + Long.parseLong(keyToDelete.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID))); + openKeyToDelete = OMFileRequest.getOmKeyInfoFromFileTable(true, + omMetadataManager, dbOpenKeyToDeleteKey, keyName); + openKeyToDelete.getMetadata().put(OzoneConsts.OVERWRITTEN_HSYNC_KEY, "true"); + openKeyToDelete.setModificationTime(Time.now()); + openKeyToDelete.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, + dbOpenKeyToDeleteKey, openKeyToDelete, keyName, fileName, trxnLogIndex); + } - final String clientIdString = String.valueOf(writerClientId); + omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime()); // non-null indicates it is necessary to update the open key OmKeyInfo newOpenKeyInfo = null; @@ -200,21 +236,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // creation and key commit, old versions will be just overwritten and // not kept. Bucket versioning will be effective from the first key // creation after the knob turned on. - boolean isPreviousCommitHsync = false; Map oldKeyVersionsToDeleteMap = null; - if (null != keyToDelete) { - isPreviousCommitHsync = java.util.Optional.of(keyToDelete) - .map(WithMetadata::getMetadata) - .map(meta -> meta.get(OzoneConsts.HSYNC_CLIENT_ID)) - .filter(id -> id.equals(clientIdString)) - .isPresent(); - } - long correctedSpace = omKeyInfo.getReplicatedSize(); - - // if keyToDelete isn't null, usedNamespace shouldn't check and - // increase. - if (keyToDelete != null && (isHSync || isPreviousCommitHsync)) { + // if keyToDelete isn't null, usedNamespace shouldn't check and increase. + if (keyToDelete != null && isSameHsyncKey) { correctedSpace -= keyToDelete.getReplicatedSize(); checkBucketQuotaInBytes(omMetadataManager, omBucketInfo, correctedSpace); @@ -227,8 +252,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn correctedSpace); String delKeyName = omMetadataManager .getOzoneKey(volumeName, bucketName, fileName); + // using pseudoObjId as objectId can be same in case of overwrite key + long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); delKeyName = omMetadataManager.getOzoneDeletePathKey( - keyToDelete.getObjectID(), delKeyName); + pseudoObjId, delKeyName); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -262,8 +289,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.put(delKeyName, - new RepeatedOmKeyInfo(pseudoKeyInfo)); + oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, + key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); } // Add to cache of open key table and key table. @@ -272,7 +299,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // indicating the key is removed from OpenKeyTable. // So that this key can't be committed again. OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, - dbOpenFileKey, null, fileName, trxnLogIndex); + dbOpenFileKey, null, fileName, keyName, trxnLogIndex); // Prevent hsync metadata from getting committed to the final key omKeyInfo.getMetadata().remove(OzoneConsts.HSYNC_CLIENT_ID); @@ -282,7 +309,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } else if (newOpenKeyInfo != null) { // isHSync is true and newOpenKeyInfo is set, update OpenKeyTable OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, - dbOpenFileKey, newOpenKeyInfo, fileName, trxnLogIndex); + dbOpenFileKey, newOpenKeyInfo, fileName, keyName, trxnLogIndex); } OMFileRequest.addFileTableCacheEntry(omMetadataManager, dbFileKey, @@ -292,7 +319,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omClientResponse = new OMKeyCommitResponseWithFSO(omResponse.build(), omKeyInfo, dbFileKey, dbOpenFileKey, omBucketInfo.copyObject(), - oldKeyVersionsToDeleteMap, volumeId, isHSync, newOpenKeyInfo); + oldKeyVersionsToDeleteMap, volumeId, isHSync, newOpenKeyInfo, dbOpenKeyToDeleteKey, openKeyToDelete); result = Result.SUCCESS; } catch (IOException | InvalidPathException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 48805d6e4e5a..e9a9f007197a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -281,6 +281,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ozoneManager.getObjectIdFromTxId(trxnLogIndex), ozoneManager.isRatisEnabled(), replicationConfig); + validateEncryptionKeyInfo(bucketInfo, keyArgs); + long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); long clientID = createKeyRequest.getClientID(); String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index 0dec9fa459f6..07f173bfd8cf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -157,6 +157,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn pathInfoFSO.getLeafNodeObjectId(), ozoneManager.isRatisEnabled(), repConfig); + validateEncryptionKeyInfo(bucketInfo, keyArgs); + long openVersion = omFileInfo.getLatestVersionLocations().getVersion(); long clientID = createKeyRequest.getClientID(); String dbOpenFileName = omMetadataManager @@ -184,7 +186,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Even if bucket gets deleted, when commitKey we shall identify if // bucket gets deleted. OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, - dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(), + dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(), keyName, trxnLogIndex); // Add cache entries for the prefix directories. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 61e5976f8052..14dc82424543 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -161,6 +162,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn long quotaReleased = sumBlockLengths(omKeyInfo); omBucketInfo.incrUsedBytes(-quotaReleased); omBucketInfo.incrUsedNamespace(-1L); + OmKeyInfo deletedOpenKeyInfo = null; // If omKeyInfo has hsync metadata, delete its corresponding open key as well String dbOpenKey = null; @@ -170,8 +172,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, hsyncClientId); OmKeyInfo openKeyInfo = openKeyTable.get(dbOpenKey); if (openKeyInfo != null) { - // Remove the open key by putting a tombstone entry - openKeyTable.addCacheEntry(dbOpenKey, trxnLogIndex); + openKeyInfo.getMetadata().put(DELETED_HSYNC_KEY, "true"); + openKeyTable.addCacheEntry(dbOpenKey, openKeyInfo, trxnLogIndex); + deletedOpenKeyInfo = openKeyInfo; } else { LOG.warn("Potentially inconsistent DB state: open key not found with dbOpenKey '{}'", dbOpenKey); } @@ -180,7 +183,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omClientResponse = new OMKeyDeleteResponse( omResponse.setDeleteKeyResponse(DeleteKeyResponse.newBuilder()) .build(), omKeyInfo, ozoneManager.isRatisEnabled(), - omBucketInfo.copyObject(), dbOpenKey); + omBucketInfo.copyObject(), deletedOpenKeyInfo); result = Result.SUCCESS; } catch (IOException | InvalidPathException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java index a0b2cfcbb154..b8b3efbc24e5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java @@ -51,6 +51,7 @@ import java.nio.file.InvalidPathException; import java.util.Map; +import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -129,6 +130,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String ozonePathKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + OmKeyInfo deletedOpenKeyInfo = null; if (keyStatus.isDirectory()) { // Check if there are any sub path exists under the user requested path @@ -165,8 +167,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn dbOpenKey = omMetadataManager.getOpenFileName(volumeId, bucketId, parentId, fileName, hsyncClientId); OmKeyInfo openKeyInfo = openKeyTable.get(dbOpenKey); if (openKeyInfo != null) { - // Remove the open key by putting a tombstone entry - openKeyTable.addCacheEntry(dbOpenKey, trxnLogIndex); + openKeyInfo.getMetadata().put(DELETED_HSYNC_KEY, "true"); + openKeyTable.addCacheEntry(dbOpenKey, openKeyInfo, trxnLogIndex); + deletedOpenKeyInfo = openKeyInfo; } else { LOG.warn("Potentially inconsistent DB state: open key not found with dbOpenKey '{}'", dbOpenKey); } @@ -175,7 +178,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omClientResponse = new OMKeyDeleteResponseWithFSO(omResponse .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(), keyName, omKeyInfo, ozoneManager.isRatisEnabled(), - omBucketInfo.copyObject(), keyStatus.isDirectory(), volumeId, dbOpenKey); + omBucketInfo.copyObject(), keyStatus.isDirectory(), volumeId, deletedOpenKeyInfo); result = Result.SUCCESS; } catch (IOException | InvalidPathException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index d7cdd3632005..0d30c9e55d36 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -92,10 +92,13 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -343,7 +346,7 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // Add all acls from direct parent to key. OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); if (prefixInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) { + if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls(), ACCESS)) { return acls; } } @@ -353,7 +356,7 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // Inherit DEFAULT acls from parent-dir only if DEFAULT acls for // prefix are not set if (omPathInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls())) { + if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls(), ACCESS)) { return acls; } } @@ -361,7 +364,7 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // Inherit DEFAULT acls from bucket only if DEFAULT acls for // parent-dir are not set. if (bucketInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { + if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), ACCESS)) { return acls; } } @@ -383,17 +386,13 @@ protected static List getAclsForDir(KeyArgs keyArgs, // Inherit DEFAULT acls from parent-dir if (omPathInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls())) { - OzoneAclUtil.toDefaultScope(acls); - } + OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls(), DEFAULT); } // Inherit DEFAULT acls from bucket only if DEFAULT acls for // parent-dir are not set. if (acls.isEmpty() && bucketInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { - OzoneAclUtil.toDefaultScope(acls); - } + OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), DEFAULT); } // add itself acls @@ -591,9 +590,14 @@ protected void getFileEncryptionInfoForMpuKey(KeyArgs keyArgs, omMetadataManager.getOpenKeyTable(getBucketLayout()) .get(dbMultipartOpenKey); - if (omKeyInfo != null && omKeyInfo.getFileEncryptionInfo() != null) { - newKeyArgs.setFileEncryptionInfo( - OMPBHelper.convert(omKeyInfo.getFileEncryptionInfo())); + if (omKeyInfo != null) { + if (omKeyInfo.getFileEncryptionInfo() != null) { + newKeyArgs.setFileEncryptionInfo( + OMPBHelper.convert(omKeyInfo.getFileEncryptionInfo())); + } + } else { + LOG.warn("omKeyInfo not found. Key: " + dbMultipartOpenKey + + ". The upload id " + keyArgs.getMultipartUploadID() + " may be invalid."); } } finally { if (acquireLock) { @@ -769,6 +773,20 @@ protected OmKeyInfo prepareFileInfo( dbKeyInfo.setModificationTime(keyArgs.getModificationTime()); dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled); dbKeyInfo.setReplicationConfig(replicationConfig); + + // Construct a new metadata map from KeyArgs. + // Clear the old one when the key is overwritten. + dbKeyInfo.getMetadata().clear(); + dbKeyInfo.getMetadata().putAll(KeyValueUtil.getFromProtobuf( + keyArgs.getMetadataList())); + + // Construct a new tags from KeyArgs + // Clear the old one when the key is overwritten + dbKeyInfo.getTags().clear(); + dbKeyInfo.getTags().putAll(KeyValueUtil.getFromProtobuf( + keyArgs.getTagsList())); + + dbKeyInfo.setFileEncryptionInfo(encInfo); return dbKeyInfo; } @@ -809,7 +827,10 @@ protected OmKeyInfo createFileInfo( keyArgs, omBucketInfo, omPathInfo, prefixManager)) .addAllMetadata(KeyValueUtil.getFromProtobuf( keyArgs.getMetadataList())) + .addAllTags(KeyValueUtil.getFromProtobuf( + keyArgs.getTagsList())) .setUpdateID(transactionLogIndex) + .setOwnerName(keyArgs.getOwnerName()) .setFile(true); if (omPathInfo instanceof OMFileRequest.OMPathInfoWithFSO) { // FileTable metadata format @@ -1051,4 +1072,11 @@ protected void filterOutBlocksStillInUse(OmKeyInfo referenceKey, LOG.debug("After block filtering, keysToBeFiltered = {}", keysToBeFiltered); } + + protected void validateEncryptionKeyInfo(OmBucketInfo bucketInfo, KeyArgs keyArgs) throws OMException { + if (bucketInfo.getEncryptionKeyInfo() != null && !keyArgs.hasFileEncryptionInfo()) { + throw new OMException("Attempting to create unencrypted file " + + keyArgs.getKeyName() + " in encrypted bucket " + keyArgs.getBucketName(), INVALID_REQUEST); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java index 1f5e623da0d2..e14cfaaad281 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java @@ -74,10 +74,12 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .setKeyName(normalizedKeyPath) .build(); + OzoneManagerProtocolProtos.KeyArgs newKeyArgs = resolveBucketLink(ozoneManager, keyArgs); + return request.toBuilder() .setSetTimesRequest( setTimesRequest.toBuilder() - .setKeyArgs(keyArgs) + .setKeyArgs(newKeyArgs) .setMtime(getModificationTime())) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index be89da369cdb..61ed5ffb1c8b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -57,11 +57,13 @@ import java.io.IOException; import java.nio.file.InvalidPathException; import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import static org.apache.hadoop.ozone.OzoneConsts.BUCKET; +import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_KEYS_LIST; import static org.apache.hadoop.ozone.OzoneConsts.UNDELETED_KEYS_LIST; import static org.apache.hadoop.ozone.OzoneConsts.VOLUME; @@ -174,18 +176,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); - List dbOpenKeys = new ArrayList<>(); + Map openKeyInfoMap = new HashMap<>(); // Mark all keys which can be deleted, in cache as deleted. quotaReleased = markKeysAsDeletedInCache(ozoneManager, trxnLogIndex, omKeyInfoList, - dirList, omMetadataManager, quotaReleased, dbOpenKeys); + dirList, omMetadataManager, quotaReleased, openKeyInfoMap); omBucketInfo.incrUsedBytes(-quotaReleased); omBucketInfo.incrUsedNamespace(-1L * omKeyInfoList.size()); final long volumeId = omMetadataManager.getVolumeId(volumeName); omClientResponse = getOmClientResponse(ozoneManager, omKeyInfoList, dirList, omResponse, - unDeletedKeys, deleteStatus, omBucketInfo, volumeId, dbOpenKeys); + unDeletedKeys, deleteStatus, omBucketInfo, volumeId, openKeyInfoMap); result = Result.SUCCESS; @@ -260,7 +262,7 @@ protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager, List omKeyInfoList, List dirList, OMResponse.Builder omResponse, OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys, - boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId, List dbOpenKeys) { + boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId, Map openKeyInfoMap) { OMClientResponse omClientResponse; omClientResponse = new OMKeysDeleteResponse(omResponse .setDeleteKeysResponse( @@ -268,13 +270,13 @@ protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager, .setUnDeletedKeys(unDeletedKeys)) .setStatus(deleteStatus ? OK : PARTIAL_DELETE).setSuccess(deleteStatus) .build(), omKeyInfoList, ozoneManager.isRatisEnabled(), - omBucketInfo.copyObject(), dbOpenKeys); + omBucketInfo.copyObject(), openKeyInfoMap); return omClientResponse; } protected long markKeysAsDeletedInCache(OzoneManager ozoneManager, long trxnLogIndex, List omKeyInfoList, List dirList, - OMMetadataManager omMetadataManager, long quotaReleased, List dbOpenKeys) + OMMetadataManager omMetadataManager, long quotaReleased, Map openKeyInfoMap) throws IOException { for (OmKeyInfo omKeyInfo : omKeyInfoList) { String volumeName = omKeyInfo.getVolumeName(); @@ -294,10 +296,10 @@ protected long markKeysAsDeletedInCache(OzoneManager ozoneManager, String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, hsyncClientId); OmKeyInfo openKeyInfo = openKeyTable.get(dbOpenKey); if (openKeyInfo != null) { - // Remove the open key by putting a tombstone entry - openKeyTable.addCacheEntry(dbOpenKey, trxnLogIndex); - // Append to the list of open keys to be deleted. The list is not expected to be large. - dbOpenKeys.add(dbOpenKey); + openKeyInfo.getMetadata().put(DELETED_HSYNC_KEY, "true"); + openKeyTable.addCacheEntry(dbOpenKey, openKeyInfo, trxnLogIndex); + // Add to the map of open keys to be deleted. + openKeyInfoMap.put(dbOpenKey, openKeyInfo); } else { LOG.warn("Potentially inconsistent DB state: open key not found with dbOpenKey '{}'", dbOpenKey); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java index b90fd15b267f..85d9c8ff4cb6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java @@ -37,7 +37,9 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; @@ -88,7 +90,7 @@ protected long markKeysAsDeletedInCache( OzoneManager ozoneManager, long trxnLogIndex, List omKeyInfoList, List dirList, OMMetadataManager omMetadataManager, - long quotaReleased, List dbOpenKeys) throws IOException { + long quotaReleased, Map openKeyInfoMap) throws IOException { // Mark all keys which can be deleted, in cache as deleted. for (OmKeyInfo omKeyInfo : omKeyInfoList) { @@ -113,10 +115,10 @@ protected long markKeysAsDeletedInCache( String dbOpenKey = omMetadataManager.getOpenFileName(volumeId, bucketId, parentId, fileName, hsyncClientId); OmKeyInfo openKeyInfo = openKeyTable.get(dbOpenKey); if (openKeyInfo != null) { - // Remove the open key by putting a tombstone entry - openKeyTable.addCacheEntry(dbOpenKey, trxnLogIndex); - // Append to the list of open keys to be deleted. The list is not expected to be large. - dbOpenKeys.add(dbOpenKey); + openKeyInfo.getMetadata().put(DELETED_HSYNC_KEY, "true"); + openKeyTable.addCacheEntry(dbOpenKey, openKeyInfo, trxnLogIndex); + // Add to the map of open keys to be deleted. + openKeyInfoMap.put(dbOpenKey, openKeyInfo); } else { LOG.warn("Potentially inconsistent DB state: open key not found with dbOpenKey '{}'", dbOpenKey); } @@ -146,7 +148,7 @@ protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager, List omKeyInfoList, List dirList, OzoneManagerProtocolProtos.OMResponse.Builder omResponse, OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys, - boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId, List dbOpenKeys) { + boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId, Map openKeyInfoMap) { OMClientResponse omClientResponse; omClientResponse = new OMKeysDeleteResponseWithFSO(omResponse .setDeleteKeysResponse( @@ -154,7 +156,7 @@ protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager, .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)) .setStatus(deleteStatus ? OK : PARTIAL_DELETE).setSuccess(deleteStatus) .build(), omKeyInfoList, dirList, ozoneManager.isRatisEnabled(), - omBucketInfo.copyObject(), volumeId, dbOpenKeys); + omBucketInfo.copyObject(), volumeId, openKeyInfoMap); return omClientResponse; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 345886c050b5..a8490b111524 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -33,9 +33,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.util.ObjectParser; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -66,9 +64,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); boolean lockAcquired = false; - String volume = null; - String bucket = null; - String key = null; + String prefixPath = null; + OzoneObj resolvedPrefixObj = null; OMPrefixAclOpResult operationResult = null; boolean opResult = false; Result result = null; @@ -76,20 +73,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn PrefixManagerImpl prefixManager = (PrefixManagerImpl) ozoneManager.getPrefixManager(); try { + resolvedPrefixObj = prefixManager.getResolvedPrefixObj(getOzoneObj()); prefixManager.validateOzoneObj(getOzoneObj()); - String prefixPath = getOzoneObj().getPath(); - validatePrefixPath(prefixPath); - ObjectParser objectParser = new ObjectParser(prefixPath, - OzoneManagerProtocolProtos.OzoneObj.ObjectType.PREFIX); - volume = objectParser.getVolume(); - bucket = objectParser.getBucket(); - key = objectParser.getKey(); + validatePrefixPath(resolvedPrefixObj.getPath()); + prefixPath = resolvedPrefixObj.getPath(); // check Acl if (ozoneManager.getAclsEnabled()) { checkAcls(ozoneManager, OzoneObj.ResourceType.PREFIX, OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, - volume, bucket, key); + resolvedPrefixObj.getVolumeName(), resolvedPrefixObj.getBucketName(), + resolvedPrefixObj.getPrefixName()); } mergeOmLockDetails(omMetadataManager.getLock() @@ -102,7 +96,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } try { - operationResult = apply(prefixManager, omPrefixInfo, trxnLogIndex); + operationResult = apply(resolvedPrefixObj, prefixManager, omPrefixInfo, trxnLogIndex); } catch (IOException ex) { // In HA case this will never happen. // As in add/remove/setAcl method we have logic to update database, @@ -145,16 +139,21 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } finally { if (lockAcquired) { mergeOmLockDetails(omMetadataManager.getLock() - .releaseWriteLock(PREFIX_LOCK, getOzoneObj().getPath())); + .releaseWriteLock(PREFIX_LOCK, prefixPath)); } if (omClientResponse != null) { omClientResponse.setOmLockDetails(getOmLockDetails()); } } - OzoneObj obj = getOzoneObj(); + OzoneObj obj = resolvedPrefixObj; + if (obj == null) { + // Fall back to the prefix under link bucket + obj = getOzoneObj(); + } + Map auditMap = obj.toAuditMap(); - onComplete(opResult, exception, ozoneManager.getMetrics(), result, + onComplete(obj, opResult, exception, ozoneManager.getMetrics(), result, trxnLogIndex, ozoneManager.getAuditLogger(), auditMap); return omClientResponse; @@ -168,24 +167,26 @@ private void validatePrefixPath(String prefixPath) throws OMException { } /** - * Get the path name from the request. - * @return path name + * Get the prefix ozone object passed in the request. + * Note: The ozone object might still refer to a prefix under a link bucket which + * might require to be resolved. + * @return Prefix ozone object. */ abstract OzoneObj getOzoneObj(); // TODO: Finer grain metrics can be moved to these callbacks. They can also // be abstracted into separate interfaces in future. /** - * Get the initial om response builder with lock. - * @return om response builder. + * Get the initial OM response builder with lock. + * @return OM response builder. */ abstract OMResponse.Builder onInit(); /** - * Get the om client response on success case with lock. - * @param omResponse - * @param omPrefixInfo - * @param operationResult + * Get the OM client response on success case with lock. + * @param omResponse OM response builder. + * @param omPrefixInfo The updated prefix info. + * @param operationResult The operation result. See {@link OMPrefixAclOpResult}. * @return OMClientResponse */ abstract OMClientResponse onSuccess( @@ -194,8 +195,8 @@ abstract OMClientResponse onSuccess( /** * Get the om client response on failure case with lock. - * @param omResponse - * @param exception + * @param omResponse OM response builder. + * @param exception Exception thrown while processing the request. * @return OMClientResponse */ abstract OMClientResponse onFailure(OMResponse.Builder omResponse, @@ -204,23 +205,28 @@ abstract OMClientResponse onFailure(OMResponse.Builder omResponse, /** * Completion hook for final processing before return without lock. * Usually used for logging without lock and metric update. - * @param operationResult - * @param exception - * @param omMetrics + * @param resolvedOzoneObj Resolved prefix object in case the prefix is under a link bucket. + * The original ozone object if the prefix is not under a link bucket. + * @param operationResult The operation result. See {@link OMPrefixAclOpResult}. + * @param exception Exception thrown while processing the request. + * @param omMetrics OM metrics used to update the relevant metrics. */ - abstract void onComplete(boolean operationResult, Exception exception, - OMMetrics omMetrics, Result result, long trxnLogIndex, - AuditLogger auditLogger, Map auditMap); + @SuppressWarnings("checkstyle:ParameterNumber") + abstract void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, + Exception exception, OMMetrics omMetrics, Result result, long trxnLogIndex, + AuditLogger auditLogger, Map auditMap); /** - * Apply the acl operation, if successfully completed returns true, - * else false. - * @param prefixManager - * @param omPrefixInfo - * @param trxnLogIndex - * @throws IOException + * Apply the acl operation to underlying storage (prefix tree and table cache). + * @param resolvedOzoneObj Resolved prefix object in case the prefix is under a link bucket. + * The original ozone object if the prefix is not under a link bucket. + * @param prefixManager Prefix manager used to update the underlying prefix storage. + * @param omPrefixInfo Previous prefix info, null if there is no existing prefix info. + * @param trxnLogIndex Transaction log index. + * @return result of the prefix operation, see {@link OMPrefixAclOpResult}. + * @throws IOException Exception thrown when updating the underlying prefix storage. */ - abstract OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + abstract OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java index fe75928795b6..c290b08939c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java @@ -19,10 +19,8 @@ package org.apache.hadoop.ozone.om.request.key.acl.prefix; import java.io.IOException; -import java.util.List; import java.util.Map; -import com.google.common.collect.Lists; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -55,8 +53,8 @@ public class OMPrefixAddAclRequest extends OMPrefixAclRequest { private static final Logger LOG = LoggerFactory.getLogger(OMPrefixAddAclRequest.class); - private OzoneObj ozoneObj; - private List ozoneAcls; + private final OzoneObj ozoneObj; + private final OzoneAcl ozoneAcl; public OMPrefixAddAclRequest(OMRequest omRequest) { super(omRequest); @@ -65,8 +63,7 @@ public OMPrefixAddAclRequest(OMRequest omRequest) { // TODO: conversion of OzoneObj to protobuf can be avoided when we have // single code path for HA and Non-HA ozoneObj = OzoneObjInfo.fromProtobuf(addAclRequest.getObj()); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(addAclRequest.getAcl())); + ozoneAcl = OzoneAcl.fromProtobuf(addAclRequest.getAcl()); } @Override @@ -96,41 +93,41 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, } @Override - void onComplete(boolean operationResult, Exception exception, + void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, Exception exception, OMMetrics omMetrics, Result result, long trxnLogIndex, AuditLogger auditLogger, Map auditMap) { switch (result) { case SUCCESS: if (LOG.isDebugEnabled()) { if (operationResult) { - LOG.debug("Add acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); + LOG.debug("Add acl: {} to path: {} success!", ozoneAcl, + resolvedOzoneObj.getPath()); } else { LOG.debug("Acl {} already exists in path {}", - ozoneAcls, ozoneObj.getPath()); + ozoneAcl, resolvedOzoneObj.getPath()); } } break; case FAILURE: - LOG.error("Add acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); + LOG.error("Add acl {} to path {} failed!", ozoneAcl, + resolvedOzoneObj.getPath(), exception); break; default: LOG.error("Unrecognized Result for OMPrefixAddAclRequest: {}", getOmRequest()); } - if (ozoneAcls != null) { - auditMap.put(OzoneConsts.ACL, ozoneAcls.toString()); + if (ozoneAcl != null) { + auditMap.put(OzoneConsts.ACL, ozoneAcl.toString()); } auditLog(auditLogger, buildAuditMessage(OMAction.ADD_ACL, auditMap, exception, getOmRequest().getUserInfo())); } @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException { - return prefixManager.addAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo, + return prefixManager.addAcl(resolvedOzoneObj, ozoneAcl, omPrefixInfo, trxnLogIndex); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java index 67b704121676..7c2666944c57 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java @@ -45,15 +45,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; /** - * Handle add Acl request for prefix. + * Handle remove Acl request for prefix. */ public class OMPrefixRemoveAclRequest extends OMPrefixAclRequest { private static final Logger LOG = LoggerFactory.getLogger(OMPrefixRemoveAclRequest.class); - private OzoneObj ozoneObj; - private List ozoneAcls; + private final OzoneObj ozoneObj; + private final List ozoneAcls; public OMPrefixRemoveAclRequest(OMRequest omRequest) { super(omRequest); @@ -93,25 +93,24 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, } @Override - void onComplete(boolean operationResult, Exception exception, - OMMetrics omMetrics, Result result, long trxnLogIndex, + void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, + Exception exception, OMMetrics omMetrics, Result result, long trxnLogIndex, AuditLogger auditLogger, Map auditMap) { switch (result) { case SUCCESS: if (LOG.isDebugEnabled()) { if (operationResult) { LOG.debug("Remove acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); + resolvedOzoneObj.getPath()); } else { LOG.debug("Acl {} not removed from path {} as it does not exist", - ozoneAcls, ozoneObj.getPath()); + ozoneAcls, resolvedOzoneObj.getPath()); } } break; case FAILURE: - omMetrics.incNumBucketUpdateFails(); LOG.error("Remove acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); + resolvedOzoneObj.getPath(), exception); break; default: LOG.error("Unrecognized Result for OMPrefixRemoveAclRequest: {}", @@ -126,9 +125,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException { - return prefixManager.removeAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo); + return prefixManager.removeAcl(resolvedOzoneObj, ozoneAcls.get(0), omPrefixInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java index 6e93e8ffe5e0..11fc0d150eea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java @@ -45,15 +45,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; /** - * Handle add Acl request for prefix. + * Handle set Acl request for prefix. */ public class OMPrefixSetAclRequest extends OMPrefixAclRequest { private static final Logger LOG = LoggerFactory.getLogger(OMPrefixSetAclRequest.class); - private OzoneObj ozoneObj; - private List ozoneAcls; + private final OzoneObj ozoneObj; + private final List ozoneAcls; public OMPrefixSetAclRequest(OMRequest omRequest) { super(omRequest); @@ -94,20 +94,19 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, } @Override - void onComplete(boolean operationResult, Exception exception, - OMMetrics omMetrics, Result result, long trxnLogIndex, - AuditLogger auditLogger, Map auditMap) { + void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, + Exception exception, OMMetrics omMetrics, Result result, + long trxnLogIndex, AuditLogger auditLogger, Map auditMap) { switch (result) { case SUCCESS: if (LOG.isDebugEnabled()) { LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); + resolvedOzoneObj.getPath()); } break; case FAILURE: - omMetrics.incNumBucketUpdateFails(); LOG.error("Set acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); + resolvedOzoneObj.getPath(), exception); break; default: LOG.error("Unrecognized Result for OMPrefixSetAclRequest: {}", @@ -122,9 +121,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException { - return prefixManager.setAcl(ozoneObj, ozoneAcls, omPrefixInfo, + return prefixManager.setAcl(resolvedOzoneObj, ozoneAcls, omPrefixInfo, trxnLogIndex); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java index c4fe3f7ab99c..109e63ce7cc3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -158,6 +159,8 @@ private void auditAbortedMPUs(OzoneManager ozoneManager, .getUploadID()) .build(); Map auditMap = buildKeyArgsAuditMap(keyArgsForAudit); + auditMap.put(OzoneConsts.UPLOAD_ID, abortInfo.getOmMultipartKeyInfo() + .getUploadID()); auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( OMAction.ABORT_EXPIRED_MULTIPART_UPLOAD, auditMap, null, getOmRequest().getUserInfo())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index e1772d4009c5..f16ef9f8f423 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -20,6 +20,8 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -120,6 +122,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn Preconditions.checkNotNull(keyArgs.getMultipartUploadID()); Map auditMap = buildKeyArgsAuditMap(keyArgs); + auditMap.put(OzoneConsts.UPLOAD_ID, keyArgs.getMultipartUploadID()); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -211,6 +214,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setUpdateID(transactionLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null) + .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList())) + .setOwnerName(keyArgs.getOwnerName()) + .addAllTags(KeyValueUtil.getFromProtobuf(keyArgs.getTagsList())) .build(); // Add to cache diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index d1c865fbc7fe..de78c6651109 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -20,6 +20,8 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneConfigUtil; @@ -78,6 +80,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn Preconditions.checkNotNull(keyArgs.getMultipartUploadID()); Map auditMap = buildKeyArgsAuditMap(keyArgs); + auditMap.put(OzoneConsts.UPLOAD_ID, keyArgs.getMultipartUploadID()); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -175,6 +178,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyArgs.getKeyName()) + .setOwnerName(keyArgs.getOwnerName()) .setCreationTime(keyArgs.getModificationTime()) .setModificationTime(keyArgs.getModificationTime()) .setReplicationConfig(replicationConfig) @@ -187,6 +191,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null) .setParentObjectID(pathInfoFSO.getLastKnownParentId()) + .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList())) + .addAllTags(KeyValueUtil.getFromProtobuf(keyArgs.getTagsList())) .build(); // validate and update namespace for missing parent directory @@ -202,7 +208,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn missingParentInfos, null); OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, - multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), + multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), keyName, transactionLogIndex); // Add to cache diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index c7a7245533e1..400291d7a448 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -22,6 +22,7 @@ import java.nio.file.InvalidPathException; import java.util.Map; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -107,6 +108,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OzoneManagerProtocolProtos.KeyArgs keyArgs = multipartUploadAbortRequest .getKeyArgs(); Map auditMap = buildKeyArgsAuditMap(keyArgs); + auditMap.put(OzoneConsts.UPLOAD_ID, keyArgs.getMultipartUploadID()); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index f461bbd1719a..1123b8dba7a6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -112,6 +112,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs(); Map auditMap = buildKeyArgsAuditMap(keyArgs); + auditMap.put(OzoneConsts.UPLOAD_ID, keyArgs.getMultipartUploadID()); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -238,9 +239,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn correctedSpace); omBucketInfo.incrUsedBytes(correctedSpace); - omResponse.setCommitMultiPartUploadResponse( - MultipartCommitUploadPartResponse.newBuilder() - .setPartName(partName)); + MultipartCommitUploadPartResponse.Builder commitResponseBuilder = MultipartCommitUploadPartResponse.newBuilder() + .setPartName(partName); + String eTag = omKeyInfo.getMetadata().get(OzoneConsts.ETAG); + if (eTag != null) { + commitResponseBuilder.setETag(eTag); + } + omResponse.setCommitMultiPartUploadResponse(commitResponseBuilder); omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 99c98e3b48b2..71ed8318399a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -23,12 +23,19 @@ import java.io.IOException; import java.nio.file.InvalidPathException; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.om.OzoneConfigUtil; +import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -39,6 +46,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -80,6 +88,32 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class); + private BiFunction eTagBasedValidator = + (part, partKeyInfo) -> { + String eTag = part.getETag(); + AtomicReference dbPartETag = new AtomicReference<>(); + String dbPartName = null; + if (partKeyInfo != null) { + partKeyInfo.getPartKeyInfo().getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) + .findFirst().ifPresent(kv -> dbPartETag.set(kv.getValue())); + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(eTag, partKeyInfo == null ? null : + dbPartETag.get(), StringUtils.equals(eTag, dbPartETag.get()) || StringUtils.equals(eTag, dbPartName)); + }; + private BiFunction partNameBasedValidator = + (part, partKeyInfo) -> { + String partName = part.getPartName(); + String dbPartName = null; + if (partKeyInfo != null) { + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(partName, partKeyInfo == null ? null : + dbPartName, StringUtils.equals(partName, dbPartName)); + }; + public S3MultipartUploadCompleteRequest(OMRequest omRequest, BucketLayout bucketLayout) { super(omRequest, bucketLayout); @@ -117,6 +151,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn List partsList = multipartUploadCompleteRequest.getPartsListList(); Map auditMap = buildKeyArgsAuditMap(keyArgs); + auditMap.put(OzoneConsts.UPLOAD_ID, keyArgs.getMultipartUploadID()); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -148,11 +183,72 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); - String ozoneKey = omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName); - - String dbOzoneKey = - getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName); + List missingParentInfos; + OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest + .verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, + keyName, Paths.get(keyName)); + missingParentInfos = OMDirectoryCreateRequestWithFSO + .getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo, + pathInfoFSO, trxnLogIndex); + + if (missingParentInfos != null) { + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, + bucketName); + + // add all missing parents to directory table + addMissingParentsToCache(omBucketInfo, missingParentInfos, + omMetadataManager, volumeId, bucketId, trxnLogIndex); + + String multipartOpenKey = omMetadataManager + .getMultipartKey(volumeId, bucketId, + pathInfoFSO.getLastKnownParentId(), + pathInfoFSO.getLeafNodeName(), + keyArgs.getMultipartUploadID()); + + if (getOmKeyInfoFromOpenKeyTable(multipartOpenKey, + keyName, omMetadataManager) == null) { + + final ReplicationConfig replicationConfig = OzoneConfigUtil + .resolveReplicationConfigPreference(keyArgs.getType(), + keyArgs.getFactor(), keyArgs.getEcReplicationConfig(), + omBucketInfo != null ? + omBucketInfo.getDefaultReplicationConfig() : + null, ozoneManager); + + OmMultipartKeyInfo multipartKeyInfoFromArgs = + new OmMultipartKeyInfo.Builder() + .setUploadID(keyArgs.getMultipartUploadID()) + .setCreationTime(keyArgs.getModificationTime()) + .setReplicationConfig(replicationConfig) + .setObjectID(pathInfoFSO.getLeafNodeObjectId()) + .setUpdateID(trxnLogIndex) + .setParentID(pathInfoFSO.getLastKnownParentId()) + .build(); + + OmKeyInfo keyInfoFromArgs = new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setReplicationConfig(replicationConfig) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) + .setAcls(getAclsForKey(keyArgs, omBucketInfo, pathInfoFSO, + ozoneManager.getPrefixManager())) + .setObjectID(pathInfoFSO.getLeafNodeObjectId()) + .setUpdateID(trxnLogIndex) + .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? + OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null) + .setParentObjectID(pathInfoFSO.getLastKnownParentId()) + .build(); + + // Add missing multi part info to open key table + addMultiPartToCache(omMetadataManager, multipartOpenKey, + pathInfoFSO, keyInfoFromArgs, keyName, trxnLogIndex); + } + } String dbMultipartOpenKey = getDBMultipartOpenKey(volumeName, bucketName, keyName, uploadID, @@ -161,6 +257,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OmMultipartKeyInfo multipartKeyInfo = omMetadataManager .getMultipartInfoTable().get(multipartKey); + String ozoneKey = omMetadataManager.getOzoneKey( + volumeName, bucketName, keyName); + + String dbOzoneKey = + getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName); + // Check for directory exists with same name for the LEGACY_FS, // if it exists throw error. checkDirectoryAlreadyExists(ozoneManager, omBucketInfo, keyName, @@ -249,14 +351,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setVolume(requestedVolume) .setBucket(requestedBucket) .setKey(keyName) - .setHash(omKeyInfo.getMetadata().get("ETag"))); + .setHash(omKeyInfo.getMetadata().get(OzoneConsts.ETAG))); long volumeId = omMetadataManager.getVolumeId(volumeName); long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); omClientResponse = getOmClientResponse(multipartKey, omResponse, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove, omBucketInfo, - volumeId, bucketId); + volumeId, bucketId, missingParentInfos, multipartKeyInfo); result = Result.SUCCESS; } else { @@ -297,7 +399,8 @@ protected OMClientResponse getOmClientResponse(String multipartKey, OMResponse.Builder omResponse, String dbMultipartOpenKey, OmKeyInfo omKeyInfo, List allKeyInfoToRemove, OmBucketInfo omBucketInfo, - long volumeId, long bucketId) { + long volumeId, long bucketId, List missingParentInfos, + OmMultipartKeyInfo multipartKeyInfo) { return new S3MultipartUploadCompleteResponse(omResponse.build(), multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove, @@ -389,8 +492,11 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) .setAcls(dbOpenKeyInfo.getAcls()) - .addMetadata("ETag", - multipartUploadedKeyHash(partKeyInfoMap)); + .addAllMetadata(dbOpenKeyInfo.getMetadata()) + .addMetadata(OzoneConsts.ETAG, + multipartUploadedKeyHash(partKeyInfoMap)) + .setOwnerName(keyArgs.getOwnerName()) + .addAllTags(dbOpenKeyInfo.getTags()); // Check if db entry has ObjectID. This check is required because // it is possible that between multipart key uploads and complete, // we had an upgrade. @@ -419,8 +525,14 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, omKeyInfo.setModificationTime(keyArgs.getModificationTime()); omKeyInfo.setDataSize(dataSize); omKeyInfo.setReplicationConfig(dbOpenKeyInfo.getReplicationConfig()); - omKeyInfo.getMetadata().put("ETag", + if (dbOpenKeyInfo.getMetadata() != null) { + omKeyInfo.setMetadata(dbOpenKeyInfo.getMetadata()); + } + omKeyInfo.getMetadata().put(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); + if (dbOpenKeyInfo.getTags() != null) { + omKeyInfo.setTags(dbOpenKeyInfo.getTags()); + } } omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); return omKeyInfo; @@ -436,6 +548,22 @@ protected String getDBOzoneKey(OMMetadataManager omMetadataManager, return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); } + protected void addMissingParentsToCache(OmBucketInfo omBucketInfo, + List missingParentInfos, + OMMetadataManager omMetadataManager, + long volumeId, long bucketId, long transactionLogIndex + ) throws IOException { + // FSO is disabled. Do nothing. + } + + protected void addMultiPartToCache( + OMMetadataManager omMetadataManager, String multipartOpenKey, + OMFileRequest.OMPathInfoWithFSO pathInfoFSO, OmKeyInfo omKeyInfo, + String keyName, long transactionLogIndex + ) throws IOException { + // FSO is disabled. Do nothing. + } + protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneKey, String keyName, OMMetadataManager omMetadataManager) throws IOException { return omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); @@ -491,24 +619,19 @@ private long getMultipartDataSize(String requestedVolume, OzoneManager ozoneManager) throws OMException { long dataSize = 0; int currentPartCount = 0; + boolean eTagBasedValidationAvailable = partsList.stream().allMatch(OzoneManagerProtocolProtos.Part::hasETag); // Now do actual logic, and check for any Invalid part during this. for (OzoneManagerProtocolProtos.Part part : partsList) { currentPartCount++; int partNumber = part.getPartNumber(); - String partName = part.getPartName(); - PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber); - - String dbPartName = null; - if (partKeyInfo != null) { - dbPartName = partKeyInfo.getPartName(); - } - if (!StringUtils.equals(partName, dbPartName)) { - String omPartName = partKeyInfo == null ? null : dbPartName; + MultipartCommitRequestPart requestPart = eTagBasedValidationAvailable ? + eTagBasedValidator.apply(part, partKeyInfo) : partNameBasedValidator.apply(part, partKeyInfo); + if (!requestPart.isValid()) { throw new OMException( failureMessage(requestedVolume, requestedBucket, keyName) + - ". Provided Part info is { " + partName + ", " + partNumber + - "}, whereas OM has partName " + omPartName, + ". Provided Part info is { " + requestPart.getRequestPartId() + ", " + partNumber + + "}, whereas OM has eTag " + requestPart.getOmPartId(), OMException.ResultCodes.INVALID_PART); } @@ -641,11 +764,41 @@ private String multipartUploadedKeyHash( OmMultipartKeyInfo.PartKeyInfoMap partsList) { StringBuffer keysConcatenated = new StringBuffer(); for (PartKeyInfo partKeyInfo: partsList) { - keysConcatenated.append(KeyValueUtil.getFromProtobuf(partKeyInfo - .getPartKeyInfo().getMetadataList()).get("ETag")); + String partPropertyToComputeHash = KeyValueUtil.getFromProtobuf(partKeyInfo.getPartKeyInfo().getMetadataList()) + .get(OzoneConsts.ETAG); + if (partPropertyToComputeHash == null) { + partPropertyToComputeHash = partKeyInfo.getPartName(); + } + keysConcatenated.append(partPropertyToComputeHash); } return DigestUtils.md5Hex(keysConcatenated.toString()) + "-" + partsList.size(); } + private static class MultipartCommitRequestPart { + private String requestPartId; + + private String omPartId; + + private boolean isValid; + + MultipartCommitRequestPart(String requestPartId, String omPartId, boolean isValid) { + this.requestPartId = requestPartId; + this.omPartId = omPartId; + this.isValid = isValid; + } + + public String getRequestPartId() { + return requestPartId; + } + + public String getOmPartId() { + return omPartId; + } + + public boolean isValid() { + return isValid; + } + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java index c224786b108a..a70523381904 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java @@ -23,7 +23,9 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse; @@ -74,6 +76,37 @@ protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager, } } + @Override + protected void addMissingParentsToCache(OmBucketInfo omBucketInfo, + List missingParentInfos, + OMMetadataManager omMetadataManager, long volumeId, long bucketId, + long transactionLogIndex) throws IOException { + + // validate and update namespace for missing parent directory. + checkBucketQuotaInNamespace(omBucketInfo, missingParentInfos.size()); + omBucketInfo.incrUsedNamespace(missingParentInfos.size()); + + // Add cache entries for the missing parent directories. + OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, + volumeId, bucketId, transactionLogIndex, + missingParentInfos, null); + } + + @Override + protected void addMultiPartToCache( + OMMetadataManager omMetadataManager, String multipartOpenKey, + OMFileRequest.OMPathInfoWithFSO pathInfoFSO, OmKeyInfo omKeyInfo, + String keyName, long transactionLogIndex + ) throws IOException { + + // Add multi part to cache + OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, + multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), + keyName, transactionLogIndex); + + } + + @Override protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneFileKey, String keyName, OMMetadataManager omMetadataManager) throws IOException { @@ -147,11 +180,13 @@ protected OMClientResponse getOmClientResponse(String multipartKey, OzoneManagerProtocolProtos.OMResponse.Builder omResponse, String dbMultipartOpenKey, OmKeyInfo omKeyInfo, List allKeyInfoToRemove, OmBucketInfo omBucketInfo, - long volumeId, long bucketId) { + long volumeId, long bucketId, List missingParentInfos, + OmMultipartKeyInfo multipartKeyInfo) { return new S3MultipartUploadCompleteResponseWithFSO(omResponse.build(), multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove, - getBucketLayout(), omBucketInfo, volumeId, bucketId); + getBucketLayout(), omBucketInfo, volumeId, bucketId, + missingParentInfos, multipartKeyInfo); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index b7dba8260269..29c7628e3cca 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -19,6 +19,10 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -35,27 +39,38 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Set; import java.util.UUID; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; + /** * Handles OMSnapshotPurge Request. * This is an OM internal request. Does not need @RequireSnapshotFeatureState. */ public class OMSnapshotPurgeRequest extends OMClientRequest { + private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotPurgeRequest.class); + public OMSnapshotPurgeRequest(OMRequest omRequest) { super(omRequest); } @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); + final long trxnLogIndex = termIndex.getIndex(); + OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); @@ -76,50 +91,118 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn Map updatedPathPreviousAndGlobalSnapshots = new HashMap<>(); - // Snapshots that are purged by the SnapshotDeletingService - // will update the next snapshot so that is can be deep cleaned - // by the KeyDeletingService in the next run. + // Each snapshot purge operation does three things: + // 1. Update the snapshot chain, + // 2. Update the deep clean flag for the next active snapshot (So that it can be + // deep cleaned by the KeyDeletingService in the next run), + // 3. Finally, purge the snapshot. + // All of these steps have to be performed only when it acquires all the necessary + // locks (lock on the snapshot to be purged, lock on the next active snapshot, and + // lock on the next path and global previous snapshots). Ideally, there is no need + // for locks for snapshot purge and can rely on OMStateMachine because OMStateMachine + // is going to process each request sequentially. + // + // But there is a problem with that. After filtering unnecessary SST files for a snapshot, + // SstFilteringService updates that snapshot's SstFilter flag. SstFilteringService cannot + // use SetSnapshotProperty API because it runs on each OM independently and One OM does + // not know if the snapshot has been filtered on the other OM in HA environment. + // + // If locks are not taken snapshot purge and SstFilteringService will cause a race condition + // and override one's update with another. for (String snapTableKey : snapshotDbKeys) { - SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable() - .get(snapTableKey); - - SnapshotInfo nextSnapshot = SnapshotUtils - .getNextActiveSnapshot(fromSnapshot, - snapshotChainManager, omSnapshotManager); - - updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, - trxnLogIndex, updatedSnapInfos); - updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, - trxnLogIndex, updatedPathPreviousAndGlobalSnapshots); - ozoneManager.getOmSnapshotManager().getSnapshotCache() - .invalidate(snapTableKey); + // To acquire all the locks, a set is maintained which is keyed by snapshotTableKey. + // snapshotTableKey is nothing but /volumeName/bucketName/snapshotName. + // Once all the locks are acquired, it performs the three steps mentioned above and + // release all the locks after that. + Set> lockSet = new HashSet<>(4, 1); + try { + if (omMetadataManager.getSnapshotInfoTable().get(snapTableKey) == null) { + // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. + LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + + "Snapshot purge request.", snapTableKey); + continue; + } + + acquireLock(lockSet, snapTableKey, omMetadataManager); + SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable().get(snapTableKey); + + SnapshotInfo nextSnapshot = + SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); + + if (nextSnapshot != null) { + acquireLock(lockSet, nextSnapshot.getTableKey(), omMetadataManager); + } + + // Update the chain first so that it has all the necessary locks before updating deep clean. + updateSnapshotChainAndCache(lockSet, omMetadataManager, fromSnapshot, trxnLogIndex, + updatedPathPreviousAndGlobalSnapshots); + updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex, updatedSnapInfos); + // Remove and close snapshot's RocksDB instance from SnapshotCache. + omSnapshotManager.invalidateCacheEntry(fromSnapshot.getSnapshotId()); + // Update SnapshotInfoTable cache. + omMetadataManager.getSnapshotInfoTable() + .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); + } finally { + for (Triple lockKey: lockSet) { + omMetadataManager.getLock() + .releaseWriteLock(SNAPSHOT_LOCK, lockKey.getLeft(), lockKey.getMiddle(), lockKey.getRight()); + } + } } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapInfos, updatedPathPreviousAndGlobalSnapshots); + + omMetrics.incNumSnapshotPurges(); + LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating deep clean flags for " + + "snapshots: {} and global and previous for snapshots:{}.", + snapshotPurgeRequest, updatedSnapInfos.keySet(), updatedPathPreviousAndGlobalSnapshots.keySet()); } catch (IOException ex) { omClientResponse = new OMSnapshotPurgeResponse( createErrorOMResponse(omResponse, ex)); + omMetrics.incNumSnapshotPurgeFails(); + LOG.error("Failed to execute snapshotPurgeRequest:{{}}.", snapshotPurgeRequest, ex); } return omClientResponse; } + private void acquireLock(Set> lockSet, String snapshotTableKey, + OMMetadataManager omMetadataManager) throws IOException { + SnapshotInfo snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); + + // It should not be the case that lock is required for non-existing snapshot. + if (snapshotInfo == null) { + LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotTableKey); + throw new OMException("Snapshot: '{" + snapshotTableKey + "}' doesn't not exist in snapshot table.", + OMException.ResultCodes.FILE_NOT_FOUND); + } + Triple lockKey = Triple.of(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), + snapshotInfo.getName()); + if (!lockSet.contains(lockKey)) { + mergeOmLockDetails(omMetadataManager.getLock() + .acquireWriteLock(SNAPSHOT_LOCK, lockKey.getLeft(), lockKey.getMiddle(), lockKey.getRight())); + lockSet.add(lockKey); + } + } + private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManagerImpl omMetadataManager, long trxnLogIndex, - Map updatedSnapInfos) { + Map updatedSnapInfos) throws IOException { if (snapInfo != null) { + // Fetch the latest value again after acquiring lock. + SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapInfo.getTableKey()); + // Setting next snapshot deep clean to false, Since the // current snapshot is deleted. We can potentially // reclaim more keys in the next snapshot. - snapInfo.setDeepClean(false); + updatedSnapshotInfo.setDeepClean(false); // Update table cache first - omMetadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(snapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, snapInfo)); - updatedSnapInfos.put(snapInfo.getTableKey(), snapInfo); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(updatedSnapshotInfo.getTableKey()), + CacheValue.get(trxnLogIndex, updatedSnapshotInfo)); + updatedSnapInfos.put(updatedSnapshotInfo.getTableKey(), updatedSnapshotInfo); } } @@ -130,6 +213,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, * update in DB. */ private void updateSnapshotChainAndCache( + Set> lockSet, OmMetadataManagerImpl metadataManager, SnapshotInfo snapInfo, long trxnLogIndex, @@ -141,7 +225,6 @@ private void updateSnapshotChainAndCache( SnapshotChainManager snapshotChainManager = metadataManager .getSnapshotChainManager(); - SnapshotInfo nextPathSnapInfo = null; // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot @@ -157,58 +240,63 @@ private void updateSnapshotChainAndCache( return; } - // Updates next path snapshot's previous snapshot ID + String nextPathSnapshotKey = null; + if (hasNextPathSnapshot) { UUID nextPathSnapshotId = snapshotChainManager.nextPathSnapshot( snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); - - String snapshotTableKey = snapshotChainManager + nextPathSnapshotKey = snapshotChainManager .getTableKey(nextPathSnapshotId); - nextPathSnapInfo = metadataManager.getSnapshotInfoTable() - .get(snapshotTableKey); - if (nextPathSnapInfo != null) { - nextPathSnapInfo.setPathPreviousSnapshotId( - snapInfo.getPathPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } + + // Acquire lock from the snapshot + acquireLock(lockSet, nextPathSnapshotKey, metadataManager); } - // Updates next global snapshot's previous snapshot ID + String nextGlobalSnapshotKey = null; if (hasNextGlobalSnapshot) { - UUID nextGlobalSnapshotId = - snapshotChainManager.nextGlobalSnapshot(snapInfo.getSnapshotId()); - - String snapshotTableKey = snapshotChainManager - .getTableKey(nextGlobalSnapshotId); - - SnapshotInfo nextGlobalSnapInfo = metadataManager.getSnapshotInfoTable() - .get(snapshotTableKey); - // If both next global and path snapshot are same, it may overwrite - // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check - // will prevent it. - if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && - nextGlobalSnapInfo.getSnapshotId().equals( - nextPathSnapInfo.getSnapshotId())) { - nextPathSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } else if (nextGlobalSnapInfo != null) { - nextGlobalSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextGlobalSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); - } + UUID nextGlobalSnapshotId = snapshotChainManager.nextGlobalSnapshot(snapInfo.getSnapshotId()); + nextGlobalSnapshotKey = snapshotChainManager.getTableKey(nextGlobalSnapshotId); + + // Acquire lock from the snapshot + acquireLock(lockSet, nextGlobalSnapshotKey, metadataManager); + } + + SnapshotInfo nextPathSnapInfo = + nextPathSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextPathSnapshotKey) : null; + + SnapshotInfo nextGlobalSnapInfo = + nextGlobalSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextGlobalSnapshotKey) : null; + + // Updates next path snapshot's previous snapshot ID + if (nextPathSnapInfo != null) { + nextPathSnapInfo.setPathPreviousSnapshotId(snapInfo.getPathPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextPathSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); + } + + // Updates next global snapshot's previous snapshot ID + // If both next global and path snapshot are same, it may overwrite + // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check + // will prevent it. + if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && + nextGlobalSnapInfo.getSnapshotId().equals(nextPathSnapInfo.getSnapshotId())) { + nextPathSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextPathSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); + } else if (nextGlobalSnapInfo != null) { + nextGlobalSnapInfo.setGlobalPreviousSnapshotId( + snapInfo.getGlobalPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextGlobalSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); } snapshotChainManager.deleteSnapshot(snapInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java new file mode 100644 index 000000000000..9f1875f65d89 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -0,0 +1,230 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.snapshot; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; + +import java.io.IOException; +import java.nio.file.InvalidPathException; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotRenameResponse; +import org.apache.hadoop.ozone.om.snapshot.RequireSnapshotFeatureState; +import org.apache.hadoop.ozone.om.upgrade.DisallowedUntilLayoutVersion; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameSnapshotRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Time; +import org.apache.ratis.server.protocol.TermIndex; + +/** + * Changes snapshot name. + */ +public class OMSnapshotRenameRequest extends OMClientRequest { + + public OMSnapshotRenameRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) + @RequireSnapshotFeatureState(true) + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + final OMRequest omRequest = super.preExecute(ozoneManager); + + final RenameSnapshotRequest renameSnapshotRequest = + omRequest.getRenameSnapshotRequest(); + + final String snapshotNewName = renameSnapshotRequest.getSnapshotNewName(); + + OmUtils.validateSnapshotName(snapshotNewName); + + String volumeName = renameSnapshotRequest.getVolumeName(); + String bucketName = renameSnapshotRequest.getBucketName(); + + // Permission check + UserGroupInformation ugi = createUGIForApi(); + String bucketOwner = ozoneManager.getBucketOwner(volumeName, bucketName, + IAccessAuthorizer.ACLType.READ, OzoneObj.ResourceType.BUCKET); + if (!ozoneManager.isAdmin(ugi) && + !ozoneManager.isOwner(ugi, bucketOwner)) { + throw new OMException( + "Only bucket owners and Ozone admins can rename snapshots", + OMException.ResultCodes.PERMISSION_DENIED); + } + + // Set rename time here so OM leader and follower would have the + // exact same timestamp. + OMRequest.Builder omRequestBuilder = omRequest.toBuilder() + .setRenameSnapshotRequest( + RenameSnapshotRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotNewName(snapshotNewName) + .setSnapshotOldName(renameSnapshotRequest.getSnapshotOldName()) + .setRenameTime(Time.now())); + + return omRequestBuilder.build(); + } + + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + TermIndex termIndex) { + boolean acquiredBucketLock = false; + boolean acquiredSnapshotOldLock = false; + boolean acquiredSnapshotNewLock = false; + Exception exception = null; + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) + ozoneManager.getMetadataManager(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + OMClientResponse omClientResponse = null; + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + + UserInfo userInfo = getOmRequest().getUserInfo(); + + final RenameSnapshotRequest request = + getOmRequest().getRenameSnapshotRequest(); + + final String volumeName = request.getVolumeName(); + final String bucketName = request.getBucketName(); + final String snapshotNewName = request.getSnapshotNewName(); + final String snapshotOldName = request.getSnapshotOldName(); + + SnapshotInfo snapshotOldInfo = null; + + try { + // Acquire bucket lock + mergeOmLockDetails( + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName)); + acquiredBucketLock = getOmLockDetails().isLockAcquired(); + + mergeOmLockDetails(omMetadataManager.getLock().acquireWriteLock(SNAPSHOT_LOCK, + volumeName, bucketName, snapshotOldName)); + acquiredSnapshotOldLock = getOmLockDetails().isLockAcquired(); + + mergeOmLockDetails(omMetadataManager.getLock().acquireWriteLock(SNAPSHOT_LOCK, + volumeName, bucketName, snapshotNewName)); + acquiredSnapshotNewLock = getOmLockDetails().isLockAcquired(); + + // Retrieve SnapshotInfo from the table + String snapshotNewTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotNewName); + + if (omMetadataManager.getSnapshotInfoTable().isExist(snapshotNewTableKey)) { + throw new OMException("Snapshot with name " + snapshotNewName + "already exist", + FILE_ALREADY_EXISTS); + } + + // Retrieve SnapshotInfo from the table + String snapshotOldTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, + snapshotOldName); + snapshotOldInfo = + omMetadataManager.getSnapshotInfoTable().get(snapshotOldTableKey); + + if (snapshotOldInfo == null) { + // Snapshot does not exist + throw new OMException("Snapshot with name " + snapshotOldName + "does not exist", + FILE_NOT_FOUND); + } + + switch (snapshotOldInfo.getSnapshotStatus()) { + case SNAPSHOT_DELETED: + throw new OMException("Snapshot is already deleted. " + + "Pending reclamation.", FILE_NOT_FOUND); + case SNAPSHOT_ACTIVE: + break; + default: + // Unknown snapshot non-active state + throw new OMException("Snapshot exists but no longer in active state", + FILE_NOT_FOUND); + } + + snapshotOldInfo.setName(snapshotNewName); + + omMetadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(snapshotOldTableKey), + CacheValue.get(termIndex.getIndex())); + + omMetadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(snapshotNewTableKey), + CacheValue.get(termIndex.getIndex(), snapshotOldInfo)); + + omMetadataManager.getSnapshotChainManager().updateSnapshot(snapshotOldInfo); + + omResponse.setRenameSnapshotResponse( + OzoneManagerProtocolProtos.RenameSnapshotResponse.newBuilder() + .setSnapshotInfo(snapshotOldInfo.getProtobuf())); + omClientResponse = new OMSnapshotRenameResponse( + omResponse.build(), snapshotOldTableKey, snapshotNewTableKey, snapshotOldInfo); + + } catch (IOException | InvalidPathException ex) { + exception = ex; + omClientResponse = new OMSnapshotRenameResponse( + createErrorOMResponse(omResponse, exception)); + } finally { + if (acquiredSnapshotNewLock) { + mergeOmLockDetails(omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_LOCK, volumeName, + bucketName, snapshotNewName)); + } + if (acquiredSnapshotOldLock) { + mergeOmLockDetails(omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_LOCK, volumeName, + bucketName, snapshotOldName)); + } + if (acquiredBucketLock) { + mergeOmLockDetails(omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + if (snapshotOldInfo == null) { + // Dummy SnapshotInfo for logging and audit logging when erred + snapshotOldInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotOldName, null, Time.now()); + } + + // Perform audit logging outside the lock + auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_SNAPSHOT, + snapshotOldInfo.toAuditMap(), exception, userInfo)); + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index b3dd5206c993..c4ca3dc99e3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -36,7 +37,8 @@ import java.io.IOException; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; /** * Updates the exclusive size of the snapshot. @@ -51,6 +53,7 @@ public OMSnapshotSetPropertyRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); OMClientResponse omClientResponse = null; OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); @@ -62,16 +65,31 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .getSetSnapshotPropertyRequest(); SnapshotInfo updatedSnapInfo = null; + String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + boolean acquiredSnapshotLock = false; + String volumeName = null; + String bucketName = null; + String snapshotName = null; + try { - String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable().get(snapshotKey); + if (snapshotInfo == null) { + LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotKey); + throw new OMException("Snapshot: '{" + snapshotKey + "}' doesn't not exist in snapshot table.", FILE_NOT_FOUND); + } + + volumeName = snapshotInfo.getVolumeName(); + bucketName = snapshotInfo.getBucketName(); + snapshotName = snapshotInfo.getName(); + + mergeOmLockDetails(metadataManager.getLock() + .acquireWriteLock(SNAPSHOT_LOCK, volumeName, bucketName, snapshotName)); + + acquiredSnapshotLock = getOmLockDetails().isLockAcquired(); + updatedSnapInfo = metadataManager.getSnapshotInfoTable() .get(snapshotKey); - if (updatedSnapInfo == null) { - LOG.error("SnapshotInfo for Snapshot: {} is not found", snapshotKey); - throw new OMException("SnapshotInfo for Snapshot: " + snapshotKey + - " is not found", INVALID_SNAPSHOT_ERROR); - } if (setSnapshotPropertyRequest.hasDeepCleanedDeletedDir()) { updatedSnapInfo.setDeepCleanedDeletedDir(setSnapshotPropertyRequest @@ -101,9 +119,21 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn CacheValue.get(termIndex.getIndex(), updatedSnapInfo)); omClientResponse = new OMSnapshotSetPropertyResponse( omResponse.build(), updatedSnapInfo); + omMetrics.incNumSnapshotSetProperties(); + LOG.info("Successfully executed snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest); } catch (IOException ex) { omClientResponse = new OMSnapshotSetPropertyResponse( createErrorOMResponse(omResponse, ex)); + omMetrics.incNumSnapshotSetPropertyFails(); + LOG.error("Failed to execute snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest, ex); + } finally { + if (acquiredSnapshotLock) { + mergeOmLockDetails(metadataManager.getLock() + .releaseWriteLock(SNAPSHOT_LOCK, volumeName, bucketName, snapshotName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } } return omClientResponse; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java index aba8f6f6fc0d..e15782acafd6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java @@ -20,7 +20,7 @@ import com.google.protobuf.ByteString; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.common.PayloadUtils; +import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -44,11 +44,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn EchoRPCRequest echoRPCRequest = getOmRequest().getEchoRPCRequest(); - byte[] payloadBytes = - PayloadUtils.generatePayloadBytes(echoRPCRequest.getPayloadSizeResp()); - + final ByteString payloadBytes = PayloadUtils.generatePayloadProto2(echoRPCRequest.getPayloadSizeResp()); EchoRPCResponse echoRPCResponse = EchoRPCResponse.newBuilder() - .setPayload(ByteString.copyFrom(payloadBytes)) + .setPayload(payloadBytes) .build(); OMResponse.Builder omResponse = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java index 8eeb7bf0e4aa..610949e0f8a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java @@ -71,7 +71,7 @@ public class ValidatorRegistry { Reflections reflections = new Reflections(new ConfigurationBuilder() .setUrls(searchUrls) .setScanners(new MethodAnnotationsScanner()) - .useParallelExecutor() + .setParallel(true) ); Set describedValidators = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index bb9562dff21a..edb13f8cf984 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -36,7 +35,6 @@ import org.apache.hadoop.ozone.om.request.key.OMDirectoriesPurgeRequestWithFSO; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.slf4j.Logger; @@ -50,7 +48,6 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; /** * Response for {@link OMDirectoriesPurgeRequestWithFSO} request. @@ -65,17 +62,19 @@ public class OMDirectoriesPurgeResponseWithFSO extends OmKeyResponse { private boolean isRatisEnabled; private Map, OmBucketInfo> volBucketInfoMap; private SnapshotInfo fromSnapshotInfo; + private Map openKeyInfoMap; public OMDirectoriesPurgeResponseWithFSO(@Nonnull OMResponse omResponse, @Nonnull List paths, boolean isRatisEnabled, @Nonnull BucketLayout bucketLayout, Map, OmBucketInfo> volBucketInfoMap, - SnapshotInfo fromSnapshotInfo) { + SnapshotInfo fromSnapshotInfo, Map openKeyInfoMap) { super(omResponse, bucketLayout); this.paths = paths; this.isRatisEnabled = isRatisEnabled; this.volBucketInfoMap = volBucketInfoMap; this.fromSnapshotInfo = fromSnapshotInfo; + this.openKeyInfoMap = openKeyInfoMap; } @Override @@ -86,13 +85,12 @@ public void addToDBBatch(OMMetadataManager metadataManager, ((OmMetadataManagerImpl) metadataManager) .getOzoneManager().getOmSnapshotManager(); - try (ReferenceCounted - rcFromSnapshotInfo = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcFromSnapshotInfo = omSnapshotManager.getSnapshot( fromSnapshotInfo.getVolumeName(), fromSnapshotInfo.getBucketName(), - getSnapshotPrefix(fromSnapshotInfo.getName()), - true)) { - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshotInfo.get(); + fromSnapshotInfo.getName())) { + OmSnapshot fromSnapshot = rcFromSnapshotInfo.get(); DBStore fromSnapshotStore = fromSnapshot.getMetadataManager() .getStore(); // Init Batch Operation for snapshot db. @@ -169,6 +167,13 @@ public void processPaths(OMMetadataManager omMetadataManager, deletedKey, repeatedOmKeyInfo); } + if (!openKeyInfoMap.isEmpty()) { + for (Map.Entry entry : openKeyInfoMap.entrySet()) { + omMetadataManager.getOpenKeyTable(getBucketLayout()).putWithBatch( + batchOperation, entry.getKey(), entry.getValue()); + } + } + // Delete the visited directory from deleted directory table if (path.hasDeletedDir()) { omMetadataManager.getDeletedDirTable().deleteWithBatch(batchOperation, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java index 685b2969808b..664daccc8940 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java @@ -51,6 +51,8 @@ public class OMKeyCommitResponse extends OmKeyResponse { private Map keyToDeleteMap; private boolean isHSync; private OmKeyInfo newOpenKeyInfo; + private OmKeyInfo openKeyToUpdate; + private String openKeyNameToUpdate; @SuppressWarnings("checkstyle:ParameterNumber") public OMKeyCommitResponse( @@ -59,7 +61,7 @@ public OMKeyCommitResponse( @Nonnull OmBucketInfo omBucketInfo, Map keyToDeleteMap, boolean isHSync, - OmKeyInfo newOpenKeyInfo) { + OmKeyInfo newOpenKeyInfo, String openKeyNameToUpdate, OmKeyInfo openKeyToUpdate) { super(omResponse, omBucketInfo.getBucketLayout()); this.omKeyInfo = omKeyInfo; this.ozoneKeyName = ozoneKeyName; @@ -68,6 +70,8 @@ public OMKeyCommitResponse( this.keyToDeleteMap = keyToDeleteMap; this.isHSync = isHSync; this.newOpenKeyInfo = newOpenKeyInfo; + this.openKeyNameToUpdate = openKeyNameToUpdate; + this.openKeyToUpdate = openKeyToUpdate; } /** @@ -97,6 +101,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, .putWithBatch(batchOperation, ozoneKeyName, omKeyInfo); updateDeletedTable(omMetadataManager, batchOperation); + handleOpenKeyToUpdate(omMetadataManager, batchOperation); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, @@ -136,6 +141,14 @@ protected void updateDeletedTable(OMMetadataManager omMetadataManager, } } + protected void handleOpenKeyToUpdate(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + if (this.openKeyToUpdate != null) { + omMetadataManager.getOpenKeyTable(getBucketLayout()).putWithBatch( + batchOperation, openKeyNameToUpdate, openKeyToUpdate); + } + } + protected boolean isHSync() { return isHSync; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java index 13034f77dfb9..29a10e1d8cde 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java @@ -55,9 +55,9 @@ public OMKeyCommitResponseWithFSO( @Nonnull OmBucketInfo omBucketInfo, Map deleteKeyMap, long volumeId, boolean isHSync, - OmKeyInfo newOpenKeyInfo) { + OmKeyInfo newOpenKeyInfo, String openKeyNameToUpdate, OmKeyInfo openKeyToUpdate) { super(omResponse, omKeyInfo, ozoneKeyName, openKeyName, - omBucketInfo, deleteKeyMap, isHSync, newOpenKeyInfo); + omBucketInfo, deleteKeyMap, isHSync, newOpenKeyInfo, openKeyNameToUpdate, openKeyToUpdate); this.volumeId = volumeId; } @@ -88,6 +88,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, getOmKeyInfo(), volumeId, getOmBucketInfo().getObjectID()); updateDeletedTable(omMetadataManager, batchOperation); + handleOpenKeyToUpdate(omMetadataManager, batchOperation); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index 0cb0d745d19f..7cdd2dc36b2c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -45,15 +46,15 @@ public class OMKeyDeleteResponse extends AbstractOMKeyDeleteResponse { private OmKeyInfo omKeyInfo; private OmBucketInfo omBucketInfo; // If not null, this key will be deleted from OpenKeyTable - private String dbOpenKey; + private OmKeyInfo deletedOpenKeyInfo; public OMKeyDeleteResponse(@Nonnull OMResponse omResponse, @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled, - @Nonnull OmBucketInfo omBucketInfo, String dbOpenKey) { + @Nonnull OmBucketInfo omBucketInfo, OmKeyInfo deletedOpenKeyInfo) { super(omResponse, isRatisEnabled, omBucketInfo.getBucketLayout()); this.omKeyInfo = omKeyInfo; this.omBucketInfo = omBucketInfo; - this.dbOpenKey = dbOpenKey; + this.deletedOpenKeyInfo = deletedOpenKeyInfo; } /** @@ -83,10 +84,16 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), omBucketInfo.getBucketName()), omBucketInfo); - // Remove open key (necessary when the file is hsync'ed but not committed) - if (dbOpenKey != null) { - omMetadataManager.getOpenKeyTable(getBucketLayout()).deleteWithBatch( - batchOperation, dbOpenKey); + // necessary when the file is hsync'ed but not committed + // Update metadata which will be used to cleanup openKey in openKeyCleanupService + if (deletedOpenKeyInfo != null) { + String hsyncClientId = deletedOpenKeyInfo.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID); + if (hsyncClientId != null) { + String dbOpenKey = omMetadataManager.getOpenKey(deletedOpenKeyInfo.getVolumeName(), + deletedOpenKeyInfo.getBucketName(), deletedOpenKeyInfo.getKeyName(), hsyncClientId); + omMetadataManager.getOpenKeyTable(getBucketLayout()).putWithBatch( + batchOperation, dbOpenKey, deletedOpenKeyInfo); + } } } @@ -94,11 +101,12 @@ protected OmKeyInfo getOmKeyInfo() { return omKeyInfo; } + protected OmKeyInfo getDeletedOpenKeyInfo() { + return deletedOpenKeyInfo; + } + protected OmBucketInfo getOmBucketInfo() { return omBucketInfo; } - public String getDbOpenKey() { - return dbOpenKey; - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java index f52ea1b4ce03..e428684a1695 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -52,8 +53,8 @@ public class OMKeyDeleteResponseWithFSO extends OMKeyDeleteResponse { public OMKeyDeleteResponseWithFSO(@Nonnull OMResponse omResponse, @Nonnull String keyName, @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo, - @Nonnull boolean isDeleteDirectory, @Nonnull long volumeId, String dbOpenKey) { - super(omResponse, omKeyInfo, isRatisEnabled, omBucketInfo, dbOpenKey); + @Nonnull boolean isDeleteDirectory, @Nonnull long volumeId, OmKeyInfo deletedOpenKeyInfo) { + super(omResponse, omKeyInfo, isRatisEnabled, omBucketInfo, deletedOpenKeyInfo); this.keyName = keyName; this.isDeleteDirectory = isDeleteDirectory; this.volumeId = volumeId; @@ -110,9 +111,16 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getBucketKey(getOmBucketInfo().getVolumeName(), getOmBucketInfo().getBucketName()), getOmBucketInfo()); - if (getDbOpenKey() != null) { - omMetadataManager.getOpenKeyTable(getBucketLayout()).deleteWithBatch( - batchOperation, getDbOpenKey()); + // Update metadata which will be used to cleanup openKey in openKeyCleanupService + OmKeyInfo deletedOpenKeyInfo = getDeletedOpenKeyInfo(); + if (deletedOpenKeyInfo != null) { + String hsyncClientId = getDeletedOpenKeyInfo().getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID); + if (hsyncClientId != null) { + String dbOpenKey = omMetadataManager.getOpenKey(deletedOpenKeyInfo.getVolumeName(), + deletedOpenKeyInfo.getBucketName(), deletedOpenKeyInfo.getKeyName(), hsyncClientId); + omMetadataManager.getOpenKeyTable(getBucketLayout()).putWithBatch( + batchOperation, dbOpenKey, deletedOpenKeyInfo); + } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index 4e9ee7563310..b16ba95d78f6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -29,7 +28,6 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; @@ -41,7 +39,6 @@ import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveDeletedKeysResponse.createRepeatedOmKeyInfo; /** @@ -81,14 +78,13 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, ((OmMetadataManagerImpl) omMetadataManager) .getOzoneManager().getOmSnapshotManager(); - try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.checkForSnapshot( + try (ReferenceCounted rcOmFromSnapshot = + omSnapshotManager.getSnapshot( fromSnapshot.getVolumeName(), fromSnapshot.getBucketName(), - getSnapshotPrefix(fromSnapshot.getName()), - true)) { + fromSnapshot.getName())) { - OmSnapshot fromOmSnapshot = (OmSnapshot) rcOmFromSnapshot.get(); + OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); DBStore fromSnapshotStore = fromOmSnapshot.getMetadataManager().getStore(); // Init Batch Operation for snapshot db. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java index 23f3acd45aca..8cf7f6b9260f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java @@ -29,7 +29,9 @@ import jakarta.annotation.Nonnull; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; @@ -45,16 +47,16 @@ public class OMKeysDeleteResponse extends AbstractOMKeyDeleteResponse { private List omKeyInfoList; private OmBucketInfo omBucketInfo; - private List dbOpenKeys; + private Map openKeyInfoMap = new HashMap<>(); public OMKeysDeleteResponse(@Nonnull OMResponse omResponse, @Nonnull List keyDeleteList, boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo, - @Nonnull List dbOpenKeys) { + @Nonnull Map openKeyInfoMap) { super(omResponse, isRatisEnabled); this.omKeyInfoList = keyDeleteList; this.omBucketInfo = omBucketInfo; - this.dbOpenKeys = dbOpenKeys; + this.openKeyInfoMap = openKeyInfoMap; } /** @@ -100,9 +102,11 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), omBucketInfo.getBucketName()), omBucketInfo); - for (String dbOpenKey : dbOpenKeys) { - omMetadataManager.getOpenKeyTable(getBucketLayout()).deleteWithBatch( - batchOperation, dbOpenKey); + if (!openKeyInfoMap.isEmpty()) { + for (Map.Entry entry : openKeyInfoMap.entrySet()) { + omMetadataManager.getOpenKeyTable(getBucketLayout()).putWithBatch( + batchOperation, entry.getKey(), entry.getValue()); + } } } @@ -114,7 +118,7 @@ public OmBucketInfo getOmBucketInfo() { return omBucketInfo; } - public List getDbOpenKeys() { - return dbOpenKeys; + protected Map getOpenKeyInfoMap() { + return openKeyInfoMap; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponseWithFSO.java index 3a662727b023..861204c3938a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponseWithFSO.java @@ -29,6 +29,7 @@ import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; +import java.util.Map; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; @@ -52,8 +53,8 @@ public OMKeysDeleteResponseWithFSO( @Nonnull List keyDeleteList, @Nonnull List dirDeleteList, boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo, @Nonnull long volId, - @Nonnull List dbOpenKeys) { - super(omResponse, keyDeleteList, isRatisEnabled, omBucketInfo, dbOpenKeys); + @Nonnull Map openKeyInfoMap) { + super(omResponse, keyDeleteList, isRatisEnabled, omBucketInfo, openKeyInfoMap); this.dirsList = dirDeleteList; this.volumeId = volId; } @@ -95,9 +96,11 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getBucketKey(getOmBucketInfo().getVolumeName(), getOmBucketInfo().getBucketName()), getOmBucketInfo()); - for (String dbOpenKey : getDbOpenKeys()) { - omMetadataManager.getOpenKeyTable(getBucketLayout()).deleteWithBatch( - batchOperation, dbOpenKey); + if (!getOpenKeyInfoMap().isEmpty()) { + for (Map.Entry entry : getOpenKeyInfoMap().entrySet()) { + omMetadataManager.getOpenKeyTable(getBucketLayout()).putWithBatch( + batchOperation, entry.getKey(), entry.getValue()); + } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java index 3e390b0288ec..a1f7b796cd8a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java @@ -32,7 +32,6 @@ .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.CheckForNull; import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; @@ -58,7 +57,6 @@ public class S3MultipartUploadCompleteResponse extends OmKeyResponse { private List allKeyInfoToRemove; private OmBucketInfo omBucketInfo; - @SuppressWarnings("checkstyle:ParameterNumber") public S3MultipartUploadCompleteResponse( @Nonnull OMResponse omResponse, @Nonnull String multipartKey, @@ -66,7 +64,7 @@ public S3MultipartUploadCompleteResponse( @Nonnull OmKeyInfo omKeyInfo, @Nonnull List allKeyInfoToRemove, @Nonnull BucketLayout bucketLayout, - @CheckForNull OmBucketInfo omBucketInfo) { + OmBucketInfo omBucketInfo) { super(omResponse, bucketLayout); this.allKeyInfoToRemove = allKeyInfoToRemove; this.multipartKey = multipartKey; @@ -131,4 +129,12 @@ protected String addToKeyTable(OMMetadataManager omMetadataManager, protected OmKeyInfo getOmKeyInfo() { return omKeyInfo; } + + protected OmBucketInfo getOmBucketInfo() { + return omBucketInfo; + } + + protected String getMultiPartKey() { + return multipartKey; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java index 29edfe382533..4d1a6ce09bc1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java @@ -22,12 +22,13 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.CheckForNull; import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; @@ -53,6 +54,10 @@ public class S3MultipartUploadCompleteResponseWithFSO private long volumeId; private long bucketId; + private List missingParentInfos; + + private OmMultipartKeyInfo multipartKeyInfo; + @SuppressWarnings("checkstyle:ParameterNumber") public S3MultipartUploadCompleteResponseWithFSO( @Nonnull OMResponse omResponse, @@ -61,12 +66,16 @@ public S3MultipartUploadCompleteResponseWithFSO( @Nonnull OmKeyInfo omKeyInfo, @Nonnull List allKeyInfoToRemove, @Nonnull BucketLayout bucketLayout, - @CheckForNull OmBucketInfo omBucketInfo, - @Nonnull long volumeId, @Nonnull long bucketId) { + OmBucketInfo omBucketInfo, + @Nonnull long volumeId, @Nonnull long bucketId, + List missingParentInfos, + OmMultipartKeyInfo multipartKeyInfo) { super(omResponse, multipartKey, multipartOpenKey, omKeyInfo, allKeyInfoToRemove, bucketLayout, omBucketInfo); this.volumeId = volumeId; this.bucketId = bucketId; + this.missingParentInfos = missingParentInfos; + this.multipartKeyInfo = multipartKeyInfo; } /** @@ -79,6 +88,39 @@ public S3MultipartUploadCompleteResponseWithFSO( checkStatusNotOK(); } + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + if (missingParentInfos != null) { + // Create missing parent directory entries. + for (OmDirectoryInfo parentDirInfo : missingParentInfos) { + final String parentKey = omMetadataManager.getOzonePathKey( + volumeId, bucketId, parentDirInfo.getParentObjectID(), + parentDirInfo.getName()); + omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, + parentKey, parentDirInfo); + } + + // namespace quota changes for parent directory + String bucketKey = omMetadataManager.getBucketKey( + getOmBucketInfo().getVolumeName(), + getOmBucketInfo().getBucketName()); + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + bucketKey, getOmBucketInfo()); + + if (OMFileRequest.getOmKeyInfoFromFileTable(true, + omMetadataManager, getMultiPartKey(), getOmKeyInfo().getKeyName()) + != null) { + // Add multi part to open key table. + OMFileRequest.addToOpenFileTableForMultipart(omMetadataManager, + batchOperation, + getOmKeyInfo(), multipartKeyInfo.getUploadID(), volumeId, + bucketId); + } + } + super.addToDBBatch(omMetadataManager, batchOperation); + } + @Override protected String addToKeyTable(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java index 1255e4ae7f41..3726faacfd70 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -32,7 +31,6 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -42,7 +40,6 @@ import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; /** * Response for OMSnapshotMoveDeletedKeysRequest. @@ -93,24 +90,22 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, ((OmMetadataManagerImpl) omMetadataManager) .getOzoneManager().getOmSnapshotManager(); - try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.checkForSnapshot( + try (ReferenceCounted rcOmFromSnapshot = + omSnapshotManager.getSnapshot( fromSnapshot.getVolumeName(), fromSnapshot.getBucketName(), - getSnapshotPrefix(fromSnapshot.getName()), - true)) { + fromSnapshot.getName())) { - OmSnapshot fromOmSnapshot = (OmSnapshot) rcOmFromSnapshot.get(); + OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); if (nextSnapshot != null) { - try (ReferenceCounted - rcOmNextSnapshot = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcOmNextSnapshot = omSnapshotManager.getSnapshot( nextSnapshot.getVolumeName(), nextSnapshot.getBucketName(), - getSnapshotPrefix(nextSnapshot.getName()), - true)) { + nextSnapshot.getName())) { - OmSnapshot nextOmSnapshot = (OmSnapshot) rcOmNextSnapshot.get(); + OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); RDBStore nextSnapshotStore = (RDBStore) nextOmSnapshot.getMetadataManager().getStore(); // Init Batch Operation for snapshot db. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index b8db58d7fd9e..d300601b3858 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -80,12 +80,13 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) omMetadataManager; - updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); updateSnapInfo(metadataManager, batchOperation, updatedPreviousAndGlobalSnapInfos); + updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); for (String dbKey: snapshotDbKeys) { + // Skip the cache here because snapshot is purged from cache in OMSnapshotPurgeRequest. SnapshotInfo snapshotInfo = omMetadataManager - .getSnapshotInfoTable().get(dbKey); + .getSnapshotInfoTable().getSkipCache(dbKey); // Even though snapshot existed when SnapshotDeletingService // was running. It might be deleted in the previous run and // the DB might not have been updated yet. So snapshotInfo @@ -96,8 +97,7 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, // Delete Snapshot checkpoint directory. deleteCheckpointDirectory(omMetadataManager, snapshotInfo); - omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, - dbKey); + omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java new file mode 100644 index 000000000000..05bb16a8f514 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.snapshot; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; + +import jakarta.annotation.Nonnull; +import java.io.IOException; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +/** + * Response for OMSnapshotRenameRequest. + */ +@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE}) +public class OMSnapshotRenameResponse extends OMClientResponse { + + private String snapshotOldName; + private String snapshotNewName; + private SnapshotInfo renamedInfo; + + public OMSnapshotRenameResponse(OzoneManagerProtocolProtos.OMResponse omResponse, + String snapshotOldName, String snapshotNewName, + @Nonnull SnapshotInfo renamedInfo) { + super(omResponse); + this.snapshotOldName = snapshotOldName; + this.snapshotNewName = snapshotNewName; + this.renamedInfo = renamedInfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public OMSnapshotRenameResponse(@Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) { + super(omResponse); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) + throws IOException { + omMetadataManager.getSnapshotInfoTable() + .putWithBatch(batchOperation, snapshotNewName, renamedInfo); + omMetadataManager.getSnapshotInfoTable() + .deleteWithBatch(batchOperation, snapshotOldName); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 21ad0872769a..429e286287c1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -116,7 +116,10 @@ protected int processKeyDeletes(List keyBlocksList, } List blockDeletionResults = scmClient.deleteKeyBlocks(keyBlocksList); + LOG.info("{} BlockGroup deletion are acked by SCM in {} ms", + keyBlocksList.size(), Time.monotonicNow() - startTime); if (blockDeletionResults != null) { + startTime = Time.monotonicNow(); if (isRatisEnabled()) { delCount = submitPurgeKeysRequest(blockDeletionResults, keysToModify, snapTableKey); @@ -126,11 +129,8 @@ protected int processKeyDeletes(List keyBlocksList, // OMRequest model. delCount = deleteAllKeys(blockDeletionResults, manager); } - if (LOG.isDebugEnabled()) { - LOG.debug("Blocks for {} (out of {}) keys are deleted in {} ms", - delCount, blockDeletionResults.size(), - Time.monotonicNow() - startTime); - } + LOG.info("Blocks for {} (out of {}) keys are deleted from DB in {} ms", + delCount, blockDeletionResults.size(), Time.monotonicNow() - startTime); } return delCount; } @@ -277,12 +277,14 @@ protected RaftClientRequest createRaftClientRequestForPurge( * Parse Volume and Bucket Name from ObjectKey and add it to given map of * keys to be purged per bucket. */ - private void addToMap(Map, List> map, - String objectKey) { + private void addToMap(Map, List> map, String objectKey) { // Parse volume and bucket name String[] split = objectKey.split(OM_KEY_PREFIX); - Preconditions.assertTrue(split.length > 3, "Volume and/or Bucket Name " + - "missing from Key Name."); + Preconditions.assertTrue(split.length >= 3, "Volume and/or Bucket Name " + + "missing from Key Name " + objectKey); + if (split.length == 3) { + LOG.warn("{} missing Key Name", objectKey); + } Pair volumeBucketPair = Pair.of(split[1], split[2]); if (!map.containsKey(volumeBucketPair)) { map.put(volumeBucketPair, new ArrayList<>()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index 9643fa82969c..d7205b2c1bbf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -35,7 +34,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; @@ -238,7 +236,7 @@ private boolean previousSnapshotHasDir( OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) getOzoneManager().getMetadataManager(); - try (ReferenceCounted rcLatestSnapshot = + try (ReferenceCounted rcLatestSnapshot = metadataManager.getLatestActiveSnapshot( deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), @@ -249,11 +247,9 @@ private boolean previousSnapshotHasDir( .getRenameKey(deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), deletedDirInfo.getObjectID()); Table prevDirTable = - ((OmSnapshot) rcLatestSnapshot.get()) - .getMetadataManager().getDirectoryTable(); + rcLatestSnapshot.get().getMetadataManager().getDirectoryTable(); Table prevDeletedDirTable = - ((OmSnapshot) rcLatestSnapshot.get()) - .getMetadataManager().getDeletedDirTable(); + rcLatestSnapshot.get().getMetadataManager().getDeletedDirTable(); OmKeyInfo prevDeletedDirInfo = prevDeletedDirTable.get(key); if (prevDeletedDirInfo != null) { return true; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index e89608e82db2..83991668c9f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -46,7 +45,6 @@ import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; @@ -58,7 +56,6 @@ import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; @@ -264,13 +261,12 @@ private void processSnapshotDeepClean(int delCount) continue; } - try (ReferenceCounted - rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcCurrOmSnapshot = omSnapshotManager.getSnapshot( currSnapInfo.getVolumeName(), currSnapInfo.getBucketName(), - getSnapshotPrefix(currSnapInfo.getName()), - true)) { - OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get(); + currSnapInfo.getName())) { + OmSnapshot currOmSnapshot = rcCurrOmSnapshot.get(); Table snapDeletedTable = currOmSnapshot.getMetadataManager().getDeletedTable(); @@ -304,18 +300,16 @@ private void processSnapshotDeepClean(int delCount) Table previousKeyTable = null; Table prevRenamedTable = null; - ReferenceCounted - rcPrevOmSnapshot = null; + ReferenceCounted rcPrevOmSnapshot = null; // Split RepeatedOmKeyInfo and update current snapshot // deletedKeyTable and next snapshot deletedKeyTable. if (previousSnapshot != null) { - rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevOmSnapshot = omSnapshotManager.getSnapshot( previousSnapshot.getVolumeName(), previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), true); - OmSnapshot omPreviousSnapshot = (OmSnapshot) - rcPrevOmSnapshot.get(); + previousSnapshot.getName()); + OmSnapshot omPreviousSnapshot = rcPrevOmSnapshot.get(); previousKeyTable = omPreviousSnapshot.getMetadataManager() .getKeyTable(bucketInfo.getBucketLayout()); @@ -324,15 +318,13 @@ private void processSnapshotDeepClean(int delCount) } Table previousToPrevKeyTable = null; - ReferenceCounted - rcPrevToPrevOmSnapshot = null; + ReferenceCounted rcPrevToPrevOmSnapshot = null; if (previousToPrevSnapshot != null) { - rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevToPrevOmSnapshot = omSnapshotManager.getSnapshot( previousToPrevSnapshot.getVolumeName(), previousToPrevSnapshot.getBucketName(), - getSnapshotPrefix(previousToPrevSnapshot.getName()), true); - OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot) - rcPrevToPrevOmSnapshot.get(); + previousToPrevSnapshot.getName()); + OmSnapshot omPreviousToPrevSnapshot = rcPrevToPrevOmSnapshot.get(); previousToPrevKeyTable = omPreviousToPrevSnapshot .getMetadataManager() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index cc275b4e8e6a..29b2b319532b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.KeyManagerImpl; @@ -52,7 +51,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveDeletedKeysRequest; @@ -78,7 +76,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; /** * Background Service to clean-up deleted snapshot and reclaim space. @@ -143,10 +140,8 @@ public BackgroundTaskResult call() throws InterruptedException { getRunCount().incrementAndGet(); - ReferenceCounted rcOmSnapshot = - null; - ReferenceCounted rcOmPreviousSnapshot = - null; + ReferenceCounted rcOmSnapshot = null; + ReferenceCounted rcOmPreviousSnapshot = null; Table snapshotInfoTable = ozoneManager.getMetadataManager().getSnapshotInfoTable(); @@ -169,12 +164,11 @@ public BackgroundTaskResult call() throws InterruptedException { // Note: Can refactor this to use try-with-resources. // Handling RC decrements manually for now to minimize conflicts. - rcOmSnapshot = omSnapshotManager.checkForSnapshot( + rcOmSnapshot = omSnapshotManager.getSnapshot( snapInfo.getVolumeName(), snapInfo.getBucketName(), - getSnapshotPrefix(snapInfo.getName()), - true); - OmSnapshot omSnapshot = (OmSnapshot) rcOmSnapshot.get(); + snapInfo.getName()); + OmSnapshot omSnapshot = rcOmSnapshot.get(); Table snapshotDeletedTable = omSnapshot.getMetadataManager().getDeletedTable(); @@ -226,12 +220,11 @@ public BackgroundTaskResult call() throws InterruptedException { // Split RepeatedOmKeyInfo and update current snapshot deletedKeyTable // and next snapshot deletedKeyTable. if (previousSnapshot != null) { - rcOmPreviousSnapshot = omSnapshotManager.checkForSnapshot( + rcOmPreviousSnapshot = omSnapshotManager.getSnapshot( previousSnapshot.getVolumeName(), previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), - true); - omPreviousSnapshot = (OmSnapshot) rcOmPreviousSnapshot.get(); + previousSnapshot.getName()); + omPreviousSnapshot = rcOmPreviousSnapshot.get(); previousKeyTable = omPreviousSnapshot .getMetadataManager().getKeyTable(bucketInfo.getBucketLayout()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index 9a60f6303861..fe0f6e111ed3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -44,7 +43,6 @@ import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; @@ -63,7 +61,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; @@ -158,10 +155,8 @@ public BackgroundTaskResult call() { continue; } - ReferenceCounted - rcPrevOmSnapshot = null; - ReferenceCounted - rcPrevToPrevOmSnapshot = null; + ReferenceCounted rcPrevOmSnapshot = null; + ReferenceCounted rcPrevToPrevOmSnapshot = null; try { long volumeId = metadataManager .getVolumeId(currSnapInfo.getVolumeName()); @@ -189,12 +184,11 @@ public BackgroundTaskResult call() { Table prevRenamedTable = null; if (previousSnapshot != null) { - rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevOmSnapshot = omSnapshotManager.getActiveSnapshot( previousSnapshot.getVolumeName(), previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), false); - OmSnapshot omPreviousSnapshot = (OmSnapshot) - rcPrevOmSnapshot.get(); + previousSnapshot.getName()); + OmSnapshot omPreviousSnapshot = rcPrevOmSnapshot.get(); previousKeyTable = omPreviousSnapshot.getMetadataManager() .getKeyTable(bucketInfo.getBucketLayout()); @@ -206,12 +200,11 @@ public BackgroundTaskResult call() { Table previousToPrevKeyTable = null; if (previousToPrevSnapshot != null) { - rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevToPrevOmSnapshot = omSnapshotManager.getActiveSnapshot( previousToPrevSnapshot.getVolumeName(), previousToPrevSnapshot.getBucketName(), - getSnapshotPrefix(previousToPrevSnapshot.getName()), false); - OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot) - rcPrevToPrevOmSnapshot.get(); + previousToPrevSnapshot.getName()); + OmSnapshot omPreviousToPrevSnapshot = rcPrevToPrevOmSnapshot.get(); previousToPrevKeyTable = omPreviousToPrevSnapshot .getMetadataManager() @@ -220,14 +213,13 @@ public BackgroundTaskResult call() { String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager, currSnapInfo.getVolumeName(), currSnapInfo.getBucketName()); - try (ReferenceCounted - rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcCurrOmSnapshot = omSnapshotManager.getActiveSnapshot( currSnapInfo.getVolumeName(), currSnapInfo.getBucketName(), - getSnapshotPrefix(currSnapInfo.getName()), - false)) { + currSnapInfo.getName())) { - OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get(); + OmSnapshot currOmSnapshot = rcCurrOmSnapshot.get(); Table snapDeletedDirTable = currOmSnapshot.getMetadataManager().getDeletedDirTable(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java index 808a5ed4c192..97e19eb969d8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java @@ -25,7 +25,7 @@ /** * Add reference counter to an object instance. */ -public class ReferenceCounted +public class ReferenceCounted implements AutoCloseable { /** @@ -51,10 +51,10 @@ public class ReferenceCounted /** * Parent instance whose callback will be triggered upon this RC closure. */ - private final U parentWithCallback; + private final ReferenceCountedCallback parentWithCallback; public ReferenceCounted(T obj, boolean disableCounter, - U parentWithCallback) { + ReferenceCountedCallback parentWithCallback) { // A param to allow disabling ref counting to reduce active DB // access penalties due to AtomicLong operations. this.obj = obj; @@ -126,7 +126,9 @@ public long decrementRefCount() { Preconditions.checkState(newValTotal >= 0L, "Total reference count underflow"); } - + if (refCount.get() == 0) { + this.parentWithCallback.callback(this); + } return refCount.get(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCountedCallback.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCountedCallback.java new file mode 100644 index 000000000000..d63f5783b808 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCountedCallback.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.snapshot; + +/** + * Callback interface for ReferenceCounted. + */ +public interface ReferenceCountedCallback { + void callback(ReferenceCounted referenceCounted); +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index 226acbb7dd1b..035fc80d3468 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -19,52 +19,65 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.CacheLoader; -import org.apache.hadoop.ozone.om.IOmMetadataReader; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.hdds.utils.Scheduler; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.Iterator; -import java.util.Map; +import java.util.Set; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; /** * Thread-safe custom unbounded LRU cache to manage open snapshot DB instances. */ -public class SnapshotCache { +public class SnapshotCache implements ReferenceCountedCallback, AutoCloseable { static final Logger LOG = LoggerFactory.getLogger(SnapshotCache.class); // Snapshot cache internal hash map. - // Key: DB snapshot table key + // Key: SnapshotId // Value: OmSnapshot instance, each holds a DB instance handle inside // TODO: [SNAPSHOT] Consider wrapping SoftReference<> around IOmMetadataReader - private final ConcurrentHashMap> dbMap; + private final ConcurrentHashMap> dbMap; + + private final CacheLoader cacheLoader; - private final OmSnapshotManager omSnapshotManager; - private final CacheLoader cacheLoader; // Soft-limit of the total number of snapshot DB instances allowed to be // opened on the OM. private final int cacheSizeLimit; + private final Set pendingEvictionQueue; + private final Scheduler scheduler; + private static final String SNAPSHOT_CACHE_CLEANUP_SERVICE = + "SnapshotCacheCleanupService"; + + private final OMMetrics omMetrics; - public SnapshotCache( - OmSnapshotManager omSnapshotManager, - CacheLoader cacheLoader, - int cacheSizeLimit) { + public SnapshotCache(CacheLoader cacheLoader, int cacheSizeLimit, OMMetrics omMetrics, + long cleanupInterval) { this.dbMap = new ConcurrentHashMap<>(); - this.omSnapshotManager = omSnapshotManager; this.cacheLoader = cacheLoader; this.cacheSizeLimit = cacheSizeLimit; + this.omMetrics = omMetrics; + this.pendingEvictionQueue = ConcurrentHashMap.newKeySet(); + if (cleanupInterval > 0) { + this.scheduler = new Scheduler(SNAPSHOT_CACHE_CLEANUP_SERVICE, + true, 1); + this.scheduler.scheduleWithFixedDelay(this::cleanup, cleanupInterval, + cleanupInterval, TimeUnit.MILLISECONDS); + } else { + this.scheduler = null; + } } @VisibleForTesting - ConcurrentHashMap> getDbMap() { + ConcurrentHashMap> getDbMap() { return dbMap; } @@ -77,18 +90,19 @@ public int size() { /** * Immediately invalidate an entry. - * @param key DB snapshot table key + * @param key SnapshotId */ - public void invalidate(String key) throws IOException { + public void invalidate(UUID key) { dbMap.compute(key, (k, v) -> { if (v == null) { - LOG.warn("Key: '{}' does not exist in cache.", k); + LOG.warn("SnapshotId: '{}' does not exist in snapshot cache.", k); } else { try { - ((OmSnapshot) v.get()).close(); + v.get().close(); } catch (IOException e) { - throw new IllegalStateException("Failed to close snapshot: " + key, e); + throw new IllegalStateException("Failed to close snapshotId: " + key, e); } + omMetrics.decNumSnapshotCacheSize(); } return null; }); @@ -98,19 +112,16 @@ public void invalidate(String key) throws IOException { * Immediately invalidate all entries and close their DB instances in cache. */ public void invalidateAll() { - Iterator>> - it = dbMap.entrySet().iterator(); + for (UUID key : dbMap.keySet()) { + invalidate(key); + } + } - while (it.hasNext()) { - Map.Entry> entry = it.next(); - OmSnapshot omSnapshot = (OmSnapshot) entry.getValue().get(); - try { - // TODO: If wrapped with SoftReference<>, omSnapshot could be null? - omSnapshot.close(); - } catch (IOException e) { - throw new IllegalStateException("Failed to close snapshot", e); - } - it.remove(); + @Override + public void close() { + invalidateAll(); + if (this.scheduler != null) { + this.scheduler.close(); } } @@ -120,31 +131,31 @@ public void invalidateAll() { */ public enum Reason { FS_API_READ, - SNAPDIFF_READ, + SNAP_DIFF_READ, DEEP_CLEAN_WRITE, GARBAGE_COLLECTION_WRITE } - public ReferenceCounted get(String key) throws IOException { - return get(key, false); - } - /** * Get or load OmSnapshot. Shall be close()d after use. * TODO: [SNAPSHOT] Can add reason enum to param list later. - * @param key snapshot table key + * @param key SnapshotId * @return an OmSnapshot instance, or null on error */ - public ReferenceCounted get(String key, boolean skipActiveCheck) - throws IOException { + public ReferenceCounted get(UUID key) throws IOException { + // Warn if actual cache size exceeds the soft limit already. + if (size() > cacheSizeLimit) { + LOG.warn("Snapshot cache size ({}) exceeds configured soft-limit ({}).", + size(), cacheSizeLimit); + } // Atomic operation to initialize the OmSnapshot instance (once) if the key // does not exist, and increment the reference count on the instance. - ReferenceCounted rcOmSnapshot = + ReferenceCounted rcOmSnapshot = dbMap.compute(key, (k, v) -> { if (v == null) { - LOG.info("Loading snapshot. Table key: {}", k); + LOG.info("Loading SnapshotId: '{}'", k); try { - v = new ReferenceCounted<>(cacheLoader.load(k), false, this); + v = new ReferenceCounted<>(cacheLoader.load(key), false, this); } catch (OMException omEx) { // Return null if the snapshot is no longer active if (!omEx.getResult().equals(FILE_NOT_FOUND)) { @@ -157,6 +168,7 @@ public ReferenceCounted get(String key, boolea // Unexpected and unknown exception thrown from CacheLoader#load throw new IllegalStateException(ex); } + omMetrics.incNumSnapshotCacheSize(); } if (v != null) { // When RC OmSnapshot is successfully loaded @@ -164,101 +176,74 @@ public ReferenceCounted get(String key, boolea } return v; }); - if (rcOmSnapshot == null) { // The only exception that would fall through the loader logic above // is OMException with FILE_NOT_FOUND. - throw new OMException("Snapshot table key '" + key + "' not found, " - + "or the snapshot is no longer active", + throw new OMException("SnapshotId: '" + key + "' not found, or the snapshot is no longer active.", OMException.ResultCodes.FILE_NOT_FOUND); } - - // If the snapshot is already loaded in cache, the check inside the loader - // above is ignored. But we would still want to reject all get()s except - // when called from SDT (and some) if the snapshot is not active anymore. - if (!skipActiveCheck && !omSnapshotManager.isSnapshotStatus(key, SNAPSHOT_ACTIVE)) { - // Ref count was incremented. Need to decrement on exception here. - rcOmSnapshot.decrementRefCount(); - throw new OMException("Unable to load snapshot. " + - "Snapshot with table key '" + key + "' is no longer active", - FILE_NOT_FOUND); - } - - // Check if any entries can be cleaned up. - // At this point, cache size might temporarily exceed cacheSizeLimit - // even if there are entries that can be evicted, which is fine since it - // is a soft limit. - cleanup(); - return rcOmSnapshot; } /** * Release the reference count on the OmSnapshot instance. - * @param key snapshot table key + * @param key SnapshotId */ - public void release(String key) { - dbMap.compute(key, (k, v) -> { - if (v == null) { - throw new IllegalArgumentException("Key '" + key + "' does not exist in cache."); - } else { - v.decrementRefCount(); - } - return v; - }); - - // The cache size might have already exceeded the soft limit - // Thus triggering cleanup() to check and evict if applicable - cleanup(); + public void release(UUID key) { + ReferenceCounted val = dbMap.get(key); + if (val == null) { + throw new IllegalArgumentException("Key '" + key + "' does not " + + "exist in cache."); + } + val.decrementRefCount(); } - /** - * Alternatively, can release with OmSnapshot instance directly. - * @param omSnapshot OmSnapshot - */ - public void release(OmSnapshot omSnapshot) { - final String snapshotTableKey = omSnapshot.getSnapshotTableKey(); - release(snapshotTableKey); - } /** - * Wrapper for cleanupInternal() that is synchronized to prevent multiple - * threads from interleaving into the cleanup method. + * If cache size exceeds soft limit, attempt to clean up and close the + instances that has zero reference count. */ - private synchronized void cleanup() { + @VisibleForTesting + void cleanup() { if (dbMap.size() > cacheSizeLimit) { - cleanupInternal(); + for (UUID evictionKey : pendingEvictionQueue) { + dbMap.compute(evictionKey, (k, v) -> { + pendingEvictionQueue.remove(k); + if (v == null) { + throw new IllegalStateException("SnapshotId '" + k + "' does not exist in cache. The RocksDB " + + "instance of the Snapshot may not be closed properly."); + } + + if (v.getTotalRefCount() > 0) { + LOG.debug("SnapshotId {} is still being referenced ({}), skipping its clean up.", k, v.getTotalRefCount()); + return v; + } else { + LOG.debug("Closing SnapshotId {}. It is not being referenced anymore.", k); + // Close the instance, which also closes its DB handle. + try { + v.get().close(); + } catch (IOException ex) { + throw new IllegalStateException("Error while closing snapshot DB.", ex); + } + omMetrics.decNumSnapshotCacheSize(); + return null; + } + }); + } } } /** - * If cache size exceeds soft limit, attempt to clean up and close the - * instances that has zero reference count. - * TODO: [SNAPSHOT] Add new ozone debug CLI command to trigger this directly. + * Callback method used to enqueue or dequeue ReferenceCounted from + * pendingEvictionList. + * @param referenceCounted ReferenceCounted object */ - private void cleanupInternal() { - for (Map.Entry> entry : dbMap.entrySet()) { - dbMap.compute(entry.getKey(), (k, v) -> { - if (v == null) { - throw new IllegalStateException("Key '" + k + "' does not exist in cache. The RocksDB " + - "instance of the Snapshot may not be closed properly."); - } - - if (v.getTotalRefCount() > 0) { - LOG.debug("Snapshot {} is still being referenced ({}), skipping its clean up", - k, v.getTotalRefCount()); - return v; - } else { - LOG.debug("Closing Snapshot {}. It is not being referenced anymore.", k); - // Close the instance, which also closes its DB handle. - try { - ((OmSnapshot) v.get()).close(); - } catch (IOException ex) { - throw new IllegalStateException("Error while closing snapshot DB", ex); - } - return null; - } - }); + @Override + public void callback(ReferenceCounted referenceCounted) { + if (referenceCounted.getTotalRefCount() == 0L) { + // Reference count reaches zero, add to pendingEvictionList + pendingEvictionQueue.add(((OmSnapshot) referenceCounted.get()) + .getSnapshotID()); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 41e990097ecd..8fd239ef5318 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -25,19 +25,16 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.IOmMetadataReader; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -89,7 +86,6 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; @@ -112,9 +108,10 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getColumnFamilyToKeyPrefixMap; @@ -150,7 +147,6 @@ public class SnapshotDiffManager implements AutoCloseable { private final ManagedRocksDB db; private final RocksDBCheckpointDiffer differ; private final OzoneManager ozoneManager; - private final SnapshotCache snapshotCache; private final CodecRegistry codecRegistry; private final ManagedColumnFamilyOptions familyOptions; // TODO: [SNAPSHOT] Use different wait time based of job status. @@ -185,9 +181,7 @@ public class SnapshotDiffManager implements AutoCloseable { private final boolean diffDisableNativeLibs; - private final Optional sstDumpTool; - - private Optional sstDumpToolExecService; + private final boolean isNativeLibsLoaded; private final BiFunction generateSnapDiffJobKey = @@ -199,7 +193,6 @@ public class SnapshotDiffManager implements AutoCloseable { public SnapshotDiffManager(ManagedRocksDB db, RocksDBCheckpointDiffer differ, OzoneManager ozoneManager, - SnapshotCache snapshotCache, ColumnFamilyHandle snapDiffJobCfh, ColumnFamilyHandle snapDiffReportCfh, ManagedColumnFamilyOptions familyOptions, @@ -207,7 +200,6 @@ public SnapshotDiffManager(ManagedRocksDB db, this.db = db; this.differ = differ; this.ozoneManager = ozoneManager; - this.snapshotCache = snapshotCache; this.familyOptions = familyOptions; this.codecRegistry = codecRegistry; this.defaultWaitTime = ozoneManager.getConfiguration().getTimeDuration( @@ -264,7 +256,7 @@ public SnapshotDiffManager(ManagedRocksDB db, createEmptySnapDiffDir(path); this.sstBackupDirForSnapDiffJobs = path.toString(); - this.sstDumpTool = initSSTDumpTool(ozoneManager.getConfiguration()); + this.isNativeLibsLoaded = initNativeLibraryForEfficientDiff(ozoneManager.getConfiguration()); // Ideally, loadJobsOnStartUp should run only on OM node, since SnapDiff // is not HA currently and running this on all the nodes would be @@ -287,35 +279,16 @@ public PersistentMap getSnapDiffJobTable() { return snapDiffJobTable; } - private Optional initSSTDumpTool( - final OzoneConfiguration conf) { - if (conf.getBoolean(OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, - OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) { + private boolean initNativeLibraryForEfficientDiff(final OzoneConfiguration conf) { + if (conf.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) { try { - int threadPoolSize = conf.getInt( - OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE, - OMConfigKeys - .OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT); - int bufferSize = (int) conf.getStorageSize( - OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE, - OMConfigKeys - .OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT, - StorageUnit.BYTES); - this.sstDumpToolExecService = Optional.of(new ThreadPoolExecutor(0, - threadPoolSize, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder() - .setNameFormat(ozoneManager.getThreadNamePrefix() + - "snapshot-diff-manager-sst-dump-tool-TID-%d") - .build(), - new ThreadPoolExecutor.DiscardPolicy())); - return Optional.of(new ManagedSSTDumpTool(sstDumpToolExecService.get(), - bufferSize)); + return ManagedRawSSTFileReader.loadLibrary(); } catch (NativeLibraryNotLoadedException e) { - this.sstDumpToolExecService.ifPresent(exec -> - closeExecutorService(exec, "SstDumpToolExecutor")); + LOG.error("Native Library for raw sst file reading loading failed.", e); + return false; } } - return Optional.empty(); + return false; } /** @@ -832,8 +805,8 @@ void generateSnapshotDiffReport(final String jobKey, // job by RocksDBCheckpointDiffer#pruneOlderSnapshotsWithCompactionHistory. Path path = Paths.get(sstBackupDirForSnapDiffJobs + "/" + jobId); - ReferenceCounted rcFromSnapshot = null; - ReferenceCounted rcToSnapshot = null; + ReferenceCounted rcFromSnapshot = null; + ReferenceCounted rcToSnapshot = null; try { if (!areDiffJobAndSnapshotsActive(volumeName, bucketName, @@ -841,14 +814,15 @@ void generateSnapshotDiffReport(final String jobKey, return; } - String fsKey = getTableKey(volumeName, bucketName, fromSnapshotName); - String tsKey = getTableKey(volumeName, bucketName, toSnapshotName); - - rcFromSnapshot = snapshotCache.get(fsKey); - rcToSnapshot = snapshotCache.get(tsKey); + rcFromSnapshot = + ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, fromSnapshotName); + rcToSnapshot = + ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, toSnapshotName); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fsInfo = getSnapshotInfo(ozoneManager, volumeName, bucketName, fromSnapshotName); SnapshotInfo tsInfo = getSnapshotInfo(ozoneManager, @@ -1056,12 +1030,12 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( // Workaround to handle deletes if native rocksDb tool for reading // tombstone is not loaded. // TODO: [SNAPSHOT] Update Rocksdb SSTFileIterator to read tombstone - if (skipNativeDiff || !sstDumpTool.isPresent()) { + if (skipNativeDiff || !isNativeLibsLoaded) { deltaFiles.addAll(getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp)); } addToObjectIdMap(fsTable, tsTable, deltaFiles, - !skipNativeDiff && sstDumpTool.isPresent(), + !skipNativeDiff && isNativeLibsLoaded, oldObjIdToKeyMap, newObjIdToKeyMap, objectIdToIsDirMap, oldParentIds, newParentIds, tablePrefixes); } @@ -1092,12 +1066,9 @@ void addToObjectIdMap(Table fsTable, upperBoundCharArray[upperBoundCharArray.length - 1] += 1; sstFileReaderUpperBound = String.valueOf(upperBoundCharArray); } - try (Stream keysToCheck = - nativeRocksToolsLoaded && sstDumpTool.isPresent() - ? sstFileReader.getKeyStreamWithTombstone(sstDumpTool.get(), - sstFileReaderLowerBound, sstFileReaderUpperBound) - : sstFileReader.getKeyStream(sstFileReaderLowerBound, - sstFileReaderUpperBound)) { + try (Stream keysToCheck = nativeRocksToolsLoaded ? + sstFileReader.getKeyStreamWithTombstone(sstFileReaderLowerBound, sstFileReaderUpperBound) + : sstFileReader.getKeyStream(sstFileReaderLowerBound, sstFileReaderUpperBound)) { keysToCheck.forEach(key -> { try { final WithParentObjectId fromObjectId = fsTable.get(key); @@ -1444,7 +1415,7 @@ long generateDiffReport( */ private boolean isKeyModified(OmKeyInfo fromKey, OmKeyInfo toKey) { return !fromKey.isKeyInfoSame(toKey, - false, false, false, false) + false, false, false, false, true) || !SnapshotDeletingService.isBlockLocationInfoSame( fromKey, toKey); } @@ -1678,8 +1649,6 @@ public void close() { if (snapDiffExecutor != null) { closeExecutorService(snapDiffExecutor, "SnapDiffExecutor"); } - this.sstDumpToolExecService.ifPresent(exec -> - closeExecutorService(exec, "SstDumpToolExecutor")); } private void closeExecutorService(ExecutorService executorService, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 89823995d0cd..2041fa791a76 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -40,6 +40,7 @@ import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; @@ -148,6 +149,10 @@ public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot // is removed in-memory but OMDoubleBuffer has not flushed yet. + if (snapInfo == null) { + throw new OMException("Snapshot Info is null. Cannot get the next snapshot", INVALID_SNAPSHOT_ERROR); + } + try { while (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId())) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java index d08a0009e36e..f1e9c819e709 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java @@ -96,7 +96,7 @@ protected void registerUpgradeActions(String packageName) { .forPackages(packageName) .setScanners(new TypeAnnotationsScanner(), new SubTypesScanner()) .setExpandSuperTypes(false) - .useParallelExecutor()); + .setParallel(true)); Set> typesAnnotatedWith = reflections.getTypesAnnotatedWith(UpgradeActionOm.class); typesAnnotatedWith.forEach(actionClass -> { @@ -132,7 +132,7 @@ public static Set> getRequestClasses( .setUrls(ClasspathHelper.forPackage(packageName)) .setScanners(new SubTypesScanner()) .setExpandSuperTypes(false) - .useParallelExecutor()); + .setParallel(true)); Set> validRequests = new HashSet<>(); Set> subTypes = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index cf9bb4f0bbce..03729aebb509 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -57,6 +57,7 @@ import com.google.protobuf.ServiceException; import org.apache.hadoop.ozone.security.S3SecurityUtil; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.ExitUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,6 +87,9 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements OzoneManagerP // always true, only used in tests private boolean shouldFlushCache = true; + private OMRequest lastRequestToSubmit; + + /** * Constructs an instance of the server handler. * @@ -109,8 +113,9 @@ public OzoneManagerProtocolServerSideTranslatorPB( : OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(ozoneManager.getMetadataManager()) .enableTracing(TracingUtil.isTracingEnabled(ozoneManager.getConfiguration())) - .build(); - this.handler = new OzoneManagerRequestHandler(impl, ozoneManagerDoubleBuffer); + .build() + .start(); + this.handler = new OzoneManagerRequestHandler(impl); this.omRatisServer = ratisServer; dispatcher = new OzoneProtocolMessageDispatcher<>("OzoneProtocol", metrics, LOG, OMPBHelper::processForDebug, OMPBHelper::processForDebug); @@ -210,6 +215,7 @@ private OMResponse internalProcessRequest(OMRequest request) throws assert (omClientRequest != null); OMClientRequest finalOmClientRequest = omClientRequest; requestToSubmit = preExecute(finalOmClientRequest); + this.lastRequestToSubmit = requestToSubmit; } catch (IOException ex) { if (omClientRequest != null) { omClientRequest.handleRequestFailure(ozoneManager); @@ -233,6 +239,11 @@ private OMRequest preExecute(OMClientRequest finalOmClientRequest) () -> finalOmClientRequest.preExecute(ozoneManager)); } + @VisibleForTesting + public OMRequest getLastRequestToSubmit() { + return lastRequestToSubmit; + } + /** * Submits request to OM's Ratis server. */ @@ -278,7 +289,7 @@ private ServiceException createLeaderNotReadyException() { * Submits request directly to OM. */ private OMResponse submitRequestDirectlyToOM(OMRequest request) { - OMClientResponse omClientResponse; + final OMClientResponse omClientResponse; try { if (OmUtils.isReadOnly(request)) { return handler.handleReadRequest(request); @@ -286,8 +297,8 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { OMClientRequest omClientRequest = createClientRequest(request, ozoneManager); request = omClientRequest.preExecute(ozoneManager); - long index = transactionIndex.incrementAndGet(); - omClientResponse = handler.handleWriteRequest(request, TransactionInfo.getTermIndex(index)); + final TermIndex termIndex = TransactionInfo.getTermIndex(transactionIndex.incrementAndGet()); + omClientResponse = handler.handleWriteRequest(request, termIndex, ozoneManagerDoubleBuffer); } } catch (IOException ex) { // As some preExecute returns error. So handle here. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index b97e2160f956..d0d7a8a4f63e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.UUID; import java.util.stream.Collectors; @@ -40,7 +41,7 @@ import org.apache.hadoop.hdds.scm.protocolPB.OzonePBHelper; import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.common.PayloadUtils; +import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -69,7 +70,6 @@ import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -163,7 +163,6 @@ import static org.apache.hadoop.util.MetricUtil.captureLatencyNs; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; -import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.ProtobufUtils; import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; @@ -177,13 +176,10 @@ public class OzoneManagerRequestHandler implements RequestHandler { static final Logger LOG = LoggerFactory.getLogger(OzoneManagerRequestHandler.class); private final OzoneManager impl; - private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private FaultInjector injector; - public OzoneManagerRequestHandler(OzoneManager om, - OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) { + public OzoneManagerRequestHandler(OzoneManager om) { this.impl = om; - this.ozoneManagerDoubleBuffer = ozoneManagerDoubleBuffer; } //TODO simplify it to make it shorter @@ -401,27 +397,14 @@ public OMResponse handleReadRequest(OMRequest request) { } @Override - public OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex) throws IOException { + public OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException { injectPause(); OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, impl); return captureLatencyNs( impl.getPerfMetrics().getValidateAndUpdateCacheLatencyNs(), - () -> { - OMClientResponse omClientResponse = - omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex); - Preconditions.checkNotNull(omClientResponse, - "omClientResponse returned by validateAndUpdateCache cannot be null"); - if (omRequest.getCmdType() != Type.Prepare) { - ozoneManagerDoubleBuffer.add(omClientResponse, termIndex); - } - return omClientResponse; - }); - } - - @Override - public void updateDoubleBuffer(OzoneManagerDoubleBuffer omDoubleBuffer) { - this.ozoneManagerDoubleBuffer = omDoubleBuffer; + () -> Objects.requireNonNull(omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex), + "omClientResponse returned by validateAndUpdateCache cannot be null")); } @VisibleForTesting @@ -1481,9 +1464,8 @@ public OzoneManager getOzoneManager() { private EchoRPCResponse echoRPC(EchoRPCRequest req) { EchoRPCResponse.Builder builder = EchoRPCResponse.newBuilder(); - byte[] payloadBytes = - PayloadUtils.generatePayloadBytes(req.getPayloadSizeResp()); - builder.setPayload(ByteString.copyFrom(payloadBytes)); + final ByteString payloadBytes = PayloadUtils.generatePayloadProto2(req.getPayloadSizeResp()); + builder.setPayload(payloadBytes); return builder.build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java index 17e9f0a7d656..e60362a1ebb3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java @@ -20,10 +20,9 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. - OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. - OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.server.protocol.TermIndex; import java.io.IOException; @@ -50,22 +49,30 @@ public interface RequestHandler { void validateRequest(OMRequest omRequest) throws OMException; /** - * Handle write requests. In HA this will be called from - * OzoneManagerStateMachine applyTransaction method. In non-HA this will be - * called from {@link OzoneManagerProtocolServerSideTranslatorPB} for write - * requests. + * Handle write requests. + * In HA this will be called from OzoneManagerStateMachine applyTransaction method. + * In non-HA this will be called from {@link OzoneManagerProtocolServerSideTranslatorPB}. * - * @param omRequest - * @param termIndex - ratis transaction log (term, index) + * @param omRequest the write request + * @param termIndex - ratis transaction term and index + * @param ozoneManagerDoubleBuffer for adding response * @return OMClientResponse */ - OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex) throws IOException; + default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex, + OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) throws IOException { + final OMClientResponse response = handleWriteRequestImpl(omRequest, termIndex); + if (omRequest.getCmdType() != Type.Prepare) { + ozoneManagerDoubleBuffer.add(response, termIndex); + } + return response; + } /** - * Update the OzoneManagerDoubleBuffer. This will be called when - * stateMachine is unpaused and set with new doublebuffer object. - * @param ozoneManagerDoubleBuffer + * Implementation of {@link #handleWriteRequest(OMRequest, TermIndex, OzoneManagerDoubleBuffer)}. + * + * @param omRequest the write request + * @param termIndex - ratis transaction term and index + * @return OMClientResponse */ - void updateDoubleBuffer(OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer); - + OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index a76810e1feb4..a6fe61eb4805 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -441,10 +441,12 @@ public boolean verifySignature(OzoneTokenIdentifier identifier, signerCert = getCertClient().getCertificate( identifier.getOmCertSerialId()); } catch (CertificateException e) { + LOG.error("getCertificate failed for serialId {}", identifier.getOmCertSerialId(), e); return false; } if (signerCert == null) { + LOG.error("signerCert is null for serialId {}", identifier.getOmCertSerialId()); return false; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java index 43d29c1608a8..edffd5ed74eb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -105,12 +106,16 @@ public OmTestManagers(OzoneConfiguration conf, keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils .getInternalState(om, "keyManager"); ScmClient scmClient = new ScmClient(scmBlockClient, containerClient, conf); + ScmTopologyClient scmTopologyClient = + new ScmTopologyClient(scmBlockClient); HddsWhiteboxTestUtils.setInternalState(om, "scmClient", scmClient); HddsWhiteboxTestUtils.setInternalState(keyManager, "scmClient", scmClient); HddsWhiteboxTestUtils.setInternalState(keyManager, "secretManager", mock(OzoneBlockTokenSecretManager.class)); + HddsWhiteboxTestUtils.setInternalState(om, + "scmTopologyClient", scmTopologyClient); om.start(); waitFor(() -> om.getOmRatisServer().checkLeaderStatus() == RaftServerStatus.LEADER_AND_READY, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java index 8847a2d51e3f..8ba5ca779c1e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java @@ -31,6 +31,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; @@ -200,6 +203,14 @@ public List sortDatanodes(List nodes, return null; } + @Override + public InnerNode getNetworkTopology() { + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + InnerNode clusterTree = factory.newInnerNode("", "", null, + NetConstants.ROOT_LEVEL, 1); + return clusterTree; + } + /** * Return the number of blocks puesdo deleted by this testing client. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java index b78864e30105..f600158007b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java @@ -18,11 +18,11 @@ package org.apache.hadoop.ozone.om; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLock; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLockImpl; import org.apache.ozone.test.GenericTestUtils; @@ -103,17 +103,14 @@ public void testStampedLockBehavior() throws InterruptedException { public void testLockInOneThreadUnlockInAnother() { final AuthorizerLock authorizerLock = new AuthorizerLockImpl(); - - try { + assertDoesNotThrow(() -> { authorizerLock.tryWriteLockInOMRequest(); // Spawn another thread to release the lock. // Works as long as they share the same AuthorizerLockImpl instance. final Thread thread1 = new Thread(authorizerLock::unlockWriteInOMRequest); thread1.start(); - } catch (IOException e) { - fail("Should not have thrown: " + e.getMessage()); - } + }); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java index f9c9c5ecc81e..cd08fe88d998 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java @@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.OzoneTestBase; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -337,6 +338,8 @@ public void testDeleteNonEmptyBucket() throws Exception { .setVolumeName(volume) .setBucketName("bucket-one") .setKeyName("key-one") + .setOwnerName( + UserGroupInformation.getCurrentUser().getShortUserName()) .setAcls(Collections.emptyList()) .setLocationInfoList(new ArrayList<>()) .setReplicationConfig( @@ -350,6 +353,8 @@ public void testDeleteNonEmptyBucket() throws Exception { .setVolumeName(volume) .setBucketName("bucket-one") .setKeyName("key-two") + .setOwnerName( + UserGroupInformation.getCurrentUser().getShortUserName()) .setAcls(Collections.emptyList()) .setLocationInfoList(new ArrayList<>()) .setReplicationConfig( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java new file mode 100644 index 000000000000..1be85d204903 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.ozone.om.BucketUtilizationMetrics.BucketMetricsInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.junit.jupiter.api.Test; + +import java.util.Iterator; +import java.util.Map; + +import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Test class for BucketUtilizationMetrics. + */ +public class TestBucketUtilizationMetrics { + + private static final String VOLUME_NAME_1 = "volume1"; + private static final String VOLUME_NAME_2 = "volume2"; + private static final String BUCKET_NAME_1 = "bucket1"; + private static final String BUCKET_NAME_2 = "bucket2"; + private static final long USED_BYTES_1 = 100; + private static final long USED_BYTES_2 = 200; + private static final long QUOTA_IN_BYTES_1 = 200; + private static final long QUOTA_IN_BYTES_2 = QUOTA_RESET; + private static final long QUOTA_IN_NAMESPACE_1 = 1; + private static final long QUOTA_IN_NAMESPACE_2 = 2; + + @Test + void testBucketUtilizationMetrics() { + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + + Map.Entry, CacheValue> entry1 = createMockEntry(VOLUME_NAME_1, BUCKET_NAME_1, + USED_BYTES_1, QUOTA_IN_BYTES_1, QUOTA_IN_NAMESPACE_1); + Map.Entry, CacheValue> entry2 = createMockEntry(VOLUME_NAME_2, BUCKET_NAME_2, + USED_BYTES_2, QUOTA_IN_BYTES_2, QUOTA_IN_NAMESPACE_2); + + Iterator, CacheValue>> bucketIterator = mock(Iterator.class); + when(bucketIterator.hasNext()) + .thenReturn(true) + .thenReturn(true) + .thenReturn(false); + + when(bucketIterator.next()) + .thenReturn(entry1) + .thenReturn(entry2); + + when(omMetadataManager.getBucketIterator()).thenReturn(bucketIterator); + + MetricsRecordBuilder mb = mock(MetricsRecordBuilder.class); + when(mb.setContext(anyString())).thenReturn(mb); + when(mb.tag(any(MetricsInfo.class), anyString())).thenReturn(mb); + when(mb.addGauge(any(MetricsInfo.class), anyInt())).thenReturn(mb); + when(mb.addGauge(any(MetricsInfo.class), anyLong())).thenReturn(mb); + + MetricsCollector metricsCollector = mock(MetricsCollector.class); + when(metricsCollector.addRecord(anyString())).thenReturn(mb); + + BucketUtilizationMetrics containerMetrics = new BucketUtilizationMetrics(omMetadataManager); + + containerMetrics.getMetrics(metricsCollector, true); + + verify(mb, times(1)).tag(BucketMetricsInfo.VolumeName, VOLUME_NAME_1); + verify(mb, times(1)).tag(BucketMetricsInfo.BucketName, BUCKET_NAME_1); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketUsedBytes, USED_BYTES_1); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaBytes, QUOTA_IN_BYTES_1); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaNamespace, QUOTA_IN_NAMESPACE_1); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketAvailableBytes, + QUOTA_IN_BYTES_1 - USED_BYTES_1); + + verify(mb, times(1)).tag(BucketMetricsInfo.VolumeName, VOLUME_NAME_2); + verify(mb, times(1)).tag(BucketMetricsInfo.BucketName, BUCKET_NAME_2); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketUsedBytes, USED_BYTES_2); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaBytes, QUOTA_IN_BYTES_2); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaNamespace, QUOTA_IN_NAMESPACE_2); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketAvailableBytes, QUOTA_RESET); + } + + private static Map.Entry, CacheValue> createMockEntry(String volumeName, + String bucketName, long usedBytes, long quotaInBytes, long quotaInNamespace) { + Map.Entry, CacheValue> entry = mock(Map.Entry.class); + CacheValue cacheValue = mock(CacheValue.class); + OmBucketInfo bucketInfo = mock(OmBucketInfo.class); + + when(bucketInfo.getVolumeName()).thenReturn(volumeName); + when(bucketInfo.getBucketName()).thenReturn(bucketName); + when(bucketInfo.getUsedBytes()).thenReturn(usedBytes); + when(bucketInfo.getQuotaInBytes()).thenReturn(quotaInBytes); + when(bucketInfo.getQuotaInNamespace()).thenReturn(quotaInNamespace); + + when(cacheValue.getCacheValue()).thenReturn(bucketInfo); + + when(entry.getValue()).thenReturn(cacheValue); + + return entry; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java index df7f5b67b4e9..33a33ad807d4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; import org.apache.hadoop.ozone.client.io.KeyInputStream; import jakarta.annotation.Nonnull; @@ -34,6 +36,8 @@ */ public class TestChunkStreams { + private OzoneConfiguration conf = new OzoneConfiguration(); + @Test public void testReadGroupInputStream() throws Exception { String dataString = RandomStringUtils.randomAscii(500); @@ -90,7 +94,10 @@ private List createInputStreams(String dataString) { } private BlockInputStream createStream(byte[] buf, int offset) { - return new BlockInputStream(null, 100L, null, null, true, null) { + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); + return new BlockInputStream(null, 100L, null, null, null, + clientConfig) { private long pos; private final ByteArrayInputStream in = new ByteArrayInputStream(buf, offset, 100); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 6454a77d66f3..3b04e6a7bd5a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -23,12 +23,10 @@ import java.nio.file.Path; import java.time.Instant; import java.util.ArrayList; -import java.util.HashMap; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; @@ -44,6 +42,9 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -65,9 +66,11 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; import org.apache.ozone.test.OzoneTestBase; @@ -78,14 +81,9 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import static com.google.common.collect.Sets.newHashSet; -import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; -import static java.util.Comparator.comparing; -import static java.util.stream.Collectors.toList; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -123,6 +121,9 @@ void setup(@TempDir Path testDir) throws Exception { configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.toString()); containerClient = mock(StorageContainerLocationProtocol.class); blockClient = mock(ScmBlockLocationProtocol.class); + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + when(blockClient.getNetworkTopology()).thenReturn( + factory.newInnerNode("", "", null, NetConstants.ROOT_LEVEL, 1)); OmTestManagers omTestManagers = new OmTestManagers(configuration, blockClient, containerClient); @@ -161,6 +162,61 @@ public void listMultipartUploadPartsWithZeroUpload() throws IOException { omMultipartUploadListParts.getPartInfoList().size()); } + @Test + public void listMultipartUploadPartsWithoutEtagField() throws IOException { + // For backward compatibility reasons + final String volume = volumeName(); + final String bucket = "bucketForEtag"; + final String key = "dir/key1"; + createBucket(metadataManager, volume, bucket); + OmMultipartInfo omMultipartInfo = + initMultipartUpload(writeClient, volume, bucket, key); + + + // Commit some MPU parts without eTag field + for (int i = 1; i <= 5; i++) { + OmKeyArgs partKeyArgs = + new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(key) + .setIsMultipartKey(true) + .setMultipartUploadID(omMultipartInfo.getUploadID()) + .setMultipartUploadPartNumber(i) + .setAcls(Collections.emptyList()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) + .build(); + + OpenKeySession openKey = writeClient.openKey(partKeyArgs); + + OmKeyArgs commitPartKeyArgs = + new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(key) + .setIsMultipartKey(true) + .setMultipartUploadID(omMultipartInfo.getUploadID()) + .setMultipartUploadPartNumber(i) + .setAcls(Collections.emptyList()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .setLocationInfoList(Collections.emptyList()) + .build(); + + writeClient.commitMultipartUploadPart(commitPartKeyArgs, openKey.getId()); + } + + + OmMultipartUploadListParts omMultipartUploadListParts = keyManager + .listParts(volume, bucket, key, omMultipartInfo.getUploadID(), + 0, 10); + assertEquals(5, + omMultipartUploadListParts.getPartInfoList().size()); + + } + private String volumeName() { return getTestName(); } @@ -331,6 +387,7 @@ private OmMultipartInfo initMultipartUpload(OzoneManagerProtocol omtest, .setReplicationConfig( RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) .setAcls(new ArrayList<>()) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); OmMultipartInfo omMultipartInfo = omtest.initiateMultipartUpload(key1); return omMultipartInfo; @@ -589,9 +646,6 @@ public void listStatus() throws Exception { OMRequestTestUtils.addBucketToDB(volume, bucket, metadataManager); final Pipeline pipeline = MockPipeline.createPipeline(3); - final List nodes = pipeline.getNodes().stream() - .map(DatanodeDetails::getUuidString) - .collect(toList()); Set containerIDs = new HashSet<>(); List containersWithPipeline = new ArrayList<>(); @@ -641,7 +695,6 @@ public void listStatus() throws Exception { assertEquals(10, fileStatusList.size()); verify(containerClient).getContainerWithPipelineBatch(containerIDs); - verify(blockClient).sortDatanodes(nodes, client); // call list status the second time, and verify no more calls to // SCM. @@ -649,67 +702,4 @@ public void listStatus() throws Exception { null, Long.MAX_VALUE, client); verify(containerClient, times(1)).getContainerWithPipelineBatch(anySet()); } - - @ParameterizedTest - @ValueSource(strings = {"anyhost", ""}) - public void sortDatanodes(String client) throws Exception { - // GIVEN - int pipelineCount = 3; - int keysPerPipeline = 5; - OmKeyInfo[] keyInfos = new OmKeyInfo[pipelineCount * keysPerPipeline]; - List> expectedSortDatanodesInvocations = new ArrayList<>(); - Map> expectedSortedNodes = new HashMap<>(); - int ki = 0; - for (int p = 0; p < pipelineCount; p++) { - final Pipeline pipeline = MockPipeline.createPipeline(3); - final List nodes = pipeline.getNodes().stream() - .map(DatanodeDetails::getUuidString) - .collect(toList()); - expectedSortDatanodesInvocations.add(nodes); - final List sortedNodes = pipeline.getNodes().stream() - .sorted(comparing(DatanodeDetails::getUuidString)) - .collect(toList()); - expectedSortedNodes.put(pipeline, sortedNodes); - - when(blockClient.sortDatanodes(nodes, client)) - .thenReturn(sortedNodes); - - for (int i = 1; i <= keysPerPipeline; i++) { - OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(i, 1L)) - .setPipeline(pipeline) - .setOffset(0) - .setLength(256000) - .build(); - - OmKeyInfo keyInfo = new OmKeyInfo.Builder() - .setOmKeyLocationInfos(Arrays.asList( - new OmKeyLocationInfoGroup(0, emptyList()), - new OmKeyLocationInfoGroup(1, singletonList(keyLocationInfo)))) - .build(); - keyInfos[ki++] = keyInfo; - } - } - - // WHEN - keyManager.sortDatanodes(client, keyInfos); - - // THEN - // verify all key info locations got updated - for (OmKeyInfo keyInfo : keyInfos) { - OmKeyLocationInfoGroup locations = keyInfo.getLatestVersionLocations(); - assertNotNull(locations); - for (OmKeyLocationInfo locationInfo : locations.getLocationList()) { - Pipeline pipeline = locationInfo.getPipeline(); - List expectedOrder = expectedSortedNodes.get(pipeline); - assertEquals(expectedOrder, pipeline.getNodesInOrder()); - } - } - - // expect one invocation per pipeline - for (List nodes : expectedSortDatanodesInvocations) { - verify(blockClient).sortDatanodes(nodes, client); - } - } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java index 0079585a85b6..a4ced424522b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java @@ -156,7 +156,7 @@ public void testMultiTenancyRequestsWhenDisabled() throws IOException { // Check that Multi-Tenancy read requests are blocked when not enabled final OzoneManagerRequestHandler ozoneManagerRequestHandler = - new OzoneManagerRequestHandler(ozoneManager, null); + new OzoneManagerRequestHandler(ozoneManager); expectReadRequestToFail(ozoneManagerRequestHandler, OMRequestTestUtils.listUsersInTenantRequest(tenantId)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 7d66ba66578b..c4913879ae90 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -27,6 +28,7 @@ import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.TransactionInfo; @@ -65,6 +67,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD; @@ -589,9 +592,9 @@ public void testListOpenFiles(BucketLayout bucketLayout) throws Exception { int numOpenKeys = 3; List openKeys = new ArrayList<>(); for (int i = 0; i < numOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyPrefix + i, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, 0L, Time.now()); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyPrefix + i, + RatisReplicationConfig.getInstance(ONE)) + .build(); final String dbOpenKeyName; if (bucketLayout.isFileSystemOptimized()) { @@ -707,9 +710,10 @@ public void testGetExpiredOpenKeys(BucketLayout bucketLayout) for (int i = 0; i < numExpiredOpenKeys + numUnexpiredOpenKeys; i++) { final long creationTime = i < numExpiredOpenKeys ? expiredOpenKeyCreationTime : Time.now(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, "expired" + i, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, 0L, creationTime); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) + .setCreationTime(creationTime) + .build(); final String dbOpenKeyName; if (bucketLayout.isFileSystemOptimized()) { @@ -779,10 +783,11 @@ public void testGetExpiredOpenKeysExcludeMPUKeys( // Ensure that "expired" MPU-related open keys are not fetched. // MPU-related open keys, identified by isMultipartKey = false for (int i = 0; i < numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, "expired" + i, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, expiredOpenKeyCreationTime, true); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "expired" + i, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setCreationTime(expiredOpenKeyCreationTime) + .build(); + assertThat(keyInfo.getModificationTime()).isPositive(); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -812,10 +817,10 @@ public void testGetExpiredOpenKeysExcludeMPUKeys( // HDDS-9017. Although these open keys are MPU-related, // the isMultipartKey flags are set to false for (int i = numExpiredMPUOpenKeys; i < 2 * numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, "expired" + i, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, expiredOpenKeyCreationTime, false); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) + .setCreationTime(expiredOpenKeyCreationTime) + .build(); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -879,8 +884,9 @@ public void testGetExpiredMPUs() throws Exception { String keyName = "expired" + i; // Key info to construct the MPU DB key final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, 0L, creationTime); + bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setCreationTime(creationTime) + .build(); for (int j = 1; j <= numPartsPerMPU; j++) { @@ -952,11 +958,10 @@ private void addKeysToOM(String volumeName, String bucketName, if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - 1000L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); } else { OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + RatisReplicationConfig.getInstance(ONE), omMetadataManager); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index e1ae8f57d15e..c865cb7814de 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -66,7 +66,6 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.getINode; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.truncateFileName; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -167,18 +166,25 @@ public void testCloseOnEviction() throws IOException { SnapshotInfo first = createSnapshotInfo(volumeName, bucketName); SnapshotInfo second = createSnapshotInfo(volumeName, bucketName); + first.setGlobalPreviousSnapshotId(null); + first.setPathPreviousSnapshotId(null); + second.setGlobalPreviousSnapshotId(first.getSnapshotId()); + second.setPathPreviousSnapshotId(first.getSnapshotId()); + when(snapshotInfoTable.get(first.getTableKey())).thenReturn(first); when(snapshotInfoTable.get(second.getTableKey())).thenReturn(second); + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager().addSnapshot(first); + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager().addSnapshot(second); // create the first snapshot checkpoint OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), first); // retrieve it and setup store mock OmSnapshotManager omSnapshotManager = om.getOmSnapshotManager(); - OmSnapshot firstSnapshot = (OmSnapshot) omSnapshotManager - .checkForSnapshot(first.getVolumeName(), - first.getBucketName(), getSnapshotPrefix(first.getName()), false).get(); + OmSnapshot firstSnapshot = omSnapshotManager + .getActiveSnapshot(first.getVolumeName(), first.getBucketName(), first.getName()) + .get(); DBStore firstSnapshotStore = mock(DBStore.class); HddsWhiteboxTestUtils.setInternalState( firstSnapshot.getMetadataManager(), "store", firstSnapshotStore); @@ -192,13 +198,12 @@ public void testCloseOnEviction() throws IOException { // read in second snapshot to evict first omSnapshotManager - .checkForSnapshot(second.getVolumeName(), - second.getBucketName(), getSnapshotPrefix(second.getName()), false); + .getActiveSnapshot(second.getVolumeName(), second.getBucketName(), second.getName()); // As a workaround, invalidate all cache entries in order to trigger // instances close in this test case, since JVM GC most likely would not // have triggered and closed the instances yet at this point. - omSnapshotManager.getSnapshotCache().invalidateAll(); + omSnapshotManager.invalidateCache(); // confirm store was closed verify(firstSnapshotStore, timeout(3000).times(1)).close(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java index b7c83956085f..4f0c15f15e53 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -124,6 +125,7 @@ private void createAndDeleteKey(String keyName) throws IOException { .setLocationInfoList(new ArrayList<>()) .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); /* Create and delete key in the Key Manager. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 1890958cbaad..125c9efcaf2d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.ozone.om.S3SecretManagerImpl; import org.apache.hadoop.ozone.om.S3SecretCache; import org.apache.hadoop.ozone.om.S3SecretLockedManager; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; @@ -137,7 +136,8 @@ public void setup() throws IOException { .setMaxUnFlushedTransactionCount(1000) .enableRatis(true) .setFlushNotifier(spyFlushNotifier) - .build(); + .build() + .start(); doNothing().when(omKeyCreateResponse).checkAndUpdateDB(any(), any()); doNothing().when(omBucketCreateResponse).checkAndUpdateDB(any(), any()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index dd8e642721e6..22272182997e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -37,7 +37,6 @@ .CreateBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -79,7 +78,8 @@ public void setup() throws IOException { .setOmMetadataManager(omMetadataManager) .setMaxUnFlushedTransactionCount(10000) .enableRatis(true) - .build(); + .build() + .start(); } @AfterEach diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index a97b24289cd7..54b04260d556 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -64,6 +64,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -111,7 +112,8 @@ public void setup() throws IOException { .setOmMetadataManager(omMetadataManager) .setMaxUnFlushedTransactionCount(100000) .enableRatis(true) - .build(); + .build() + .start(); } @AfterEach @@ -416,13 +418,9 @@ private void testDoubleBuffer(int volumeCount, int bucketsPerVolume) } private boolean assertRowCount(int expected, Table table) { - long count = 0L; - try { - count = omMetadataManager.countRowsInTable(table); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expected; + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> count.set(omMetadataManager.countRowsInTable(table))); + return count.get() == expected; } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java index 93997826bf33..9d8c13def4bd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java @@ -37,7 +37,6 @@ import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.ratis.server.raftlog.RaftLog; import org.apache.ratis.statemachine.TransactionContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -101,12 +100,12 @@ static void assertTermIndex(long expectedTerm, long expectedIndex, TermIndex com @Test public void testLastAppliedIndex() { ozoneManagerStateMachine.notifyTermIndexUpdated(0, 0); - assertTermIndex(0, RaftLog.INVALID_LOG_INDEX, ozoneManagerStateMachine.getLastAppliedTermIndex()); + assertTermIndex(0, 0, ozoneManagerStateMachine.getLastAppliedTermIndex()); assertTermIndex(0, 0, ozoneManagerStateMachine.getLastNotifiedTermIndex()); // Conf/metadata transaction. ozoneManagerStateMachine.notifyTermIndexUpdated(0, 1); - assertTermIndex(0, RaftLog.INVALID_LOG_INDEX, ozoneManagerStateMachine.getLastAppliedTermIndex()); + assertTermIndex(0, 1, ozoneManagerStateMachine.getLastAppliedTermIndex()); assertTermIndex(0, 1, ozoneManagerStateMachine.getLastNotifiedTermIndex()); // call update last applied index @@ -119,7 +118,7 @@ public void testLastAppliedIndex() { // Conf/metadata transaction. ozoneManagerStateMachine.notifyTermIndexUpdated(1L, 4L); - assertTermIndex(0, 3, ozoneManagerStateMachine.getLastAppliedTermIndex()); + assertTermIndex(1, 4, ozoneManagerStateMachine.getLastAppliedTermIndex()); assertTermIndex(1, 4, ozoneManagerStateMachine.getLastNotifiedTermIndex()); // Add some apply transactions. @@ -130,6 +129,26 @@ public void testLastAppliedIndex() { assertTermIndex(1, 4, ozoneManagerStateMachine.getLastNotifiedTermIndex()); } + @Test + public void testNotifyTermIndexPendingBufferUpdateIndex() { + ozoneManagerStateMachine.notifyTermIndexUpdated(0, 0); + assertTermIndex(0, 0, ozoneManagerStateMachine.getLastAppliedTermIndex()); + assertTermIndex(0, 0, ozoneManagerStateMachine.getLastNotifiedTermIndex()); + + // notifyTermIndex with skipping one of transaction which is from applyTransaction + ozoneManagerStateMachine.notifyTermIndexUpdated(0, 2); + ozoneManagerStateMachine.notifyTermIndexUpdated(0, 3); + assertTermIndex(0, 0, ozoneManagerStateMachine.getLastAppliedTermIndex()); + assertTermIndex(0, 3, ozoneManagerStateMachine.getLastNotifiedTermIndex()); + + // applyTransaction update with missing transaction as above + ozoneManagerStateMachine.updateLastAppliedTermIndex(TermIndex.valueOf(0, 1)); + assertTermIndex(0, 3, ozoneManagerStateMachine.getLastAppliedTermIndex()); + + assertTermIndex(0, 3, ozoneManagerStateMachine.getLastAppliedTermIndex()); + assertTermIndex(0, 3, ozoneManagerStateMachine.getLastNotifiedTermIndex()); + } + @Test public void testPreAppendTransaction() throws Exception { // Submit write request. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 163aefc7d3a2..c807c04688d6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -19,12 +19,20 @@ package org.apache.hadoop.ozone.om.request; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.security.DigestInputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.UUID; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -42,6 +50,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -157,23 +166,22 @@ public static void addVolumeAndBucketToDB( @SuppressWarnings("parameterNumber") public static void addKeyToTableAndCache(String volumeName, String bucketName, - String keyName, long clientID, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + String keyName, long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(false, true, volumeName, bucketName, keyName, clientID, - replicationType, replicationFactor, trxnLogIndex, omMetadataManager); + replicationConfig, trxnLogIndex, omMetadataManager); } /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. + * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager * @param locationList * @throws Exception @@ -181,12 +189,11 @@ public static void addKeyToTableAndCache(String volumeName, String bucketName, @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 0L, omMetadataManager, + clientID, replicationConfig, 0L, omMetadataManager, locationList, version); } @@ -194,24 +201,23 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. + * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 0L, omMetadataManager); + clientID, replicationConfig, 0L, omMetadataManager); } /** @@ -225,20 +231,17 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, * @param bucketName * @param keyName * @param clientID - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, isMultipartKey, false, - volumeName, bucketName, keyName, clientID, replicationType, - replicationFactor, 0L, omMetadataManager); + volumeName, bucketName, keyName, clientID, replicationConfig, 0L, omMetadataManager); } /** @@ -248,19 +251,20 @@ public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, */ @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, - String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + String volumeName, String bucketName, String keyName, long clientID, ReplicationConfig replicationConfig, + long trxnLogIndex, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, trxnLogIndex, Time.now(), version, - false); + replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setObjectID(trxnLogIndex) + .build(); + omKeyInfo.appendNewBlocks(locationList, false); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, - omMetadataManager); + omMetadataManager); } /** @@ -271,12 +275,11 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + ReplicationConfig replicationConfig, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { - OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, trxnLogIndex); + OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig) + .setObjectID(trxnLogIndex).build(); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -290,13 +293,13 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, boolean addToCache, String volumeName, String bucketName, String keyName, - long clientID, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, trxnLogIndex, Time.now(), 0L, - isMultipartKey); + replicationConfig, new OmKeyLocationInfoGroup(0, new ArrayList<>(), isMultipartKey)) + .setObjectID(trxnLogIndex) + .build(); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -431,23 +434,22 @@ public static void addPart(PartKeyInfo partKeyInfo, /** * Add key entry to key table cache. + * * @param volumeName * @param bucketName * @param keyName - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager */ @SuppressWarnings("parameterNumber") public static void addKeyToTableCache(String volumeName, String bucketName, String keyName, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager) { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor); + replicationConfig).build(); omMetadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry( new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, @@ -543,87 +545,43 @@ public static void addSnapshotToTable( /** * Create OmKeyInfo. + * Initializes most values to a sensible default. */ - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, 0L); + public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, + String keyName, ReplicationConfig replicationConfig, OmKeyLocationInfoGroup omKeyLocationInfoGroup) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setFileName(OzoneFSUtils.getFileName(keyName)) + .setReplicationConfig(replicationConfig) + .setObjectID(0L) + .setUpdateID(0L) + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .addOmKeyLocationInfoGroup(omKeyLocationInfoGroup) + .setDataSize(1000L); + } + + public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, + String keyName, ReplicationConfig replicationConfig) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig, + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), false)); } /** * Create OmDirectoryInfo. */ public static OmDirectoryInfo createOmDirectoryInfo(String keyName, - long objectID, - long parentObjID) { + long objectID, + long parentObjID) { return new OmDirectoryInfo.Builder() - .setName(keyName) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setObjectID(objectID) - .setParentObjectID(parentObjID) - .setUpdateID(50) - .build(); - } - - /** - * Create OmKeyInfo. - */ - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, Time.now()); - } - - /** - * Create OmKeyInfo. - */ - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long creationTime) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, creationTime, 0L, false); - } - - /** - * Create OmKeyInfo. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long creationTime, boolean isMultipartKey) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, creationTime, 0L, isMultipartKey); - } - - /** - * Create OmKeyInfo for LEGACY/OBS bucket. - */ - @SuppressWarnings("parameterNumber") - private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long creationTime, long version, boolean isMultipartKey) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setFileName(OzoneFSUtils.getFileName(keyName)) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(version, new ArrayList<>(), - isMultipartKey))) - .setCreationTime(creationTime) + .setName(keyName) + .setCreationTime(Time.now()) .setModificationTime(Time.now()) - .setDataSize(1000L) - .setReplicationConfig( - ReplicationConfig - .fromProtoTypeAndFactor(replicationType, replicationFactor)) .setObjectID(objectID) - .setUpdateID(objectID) + .setParentObjectID(parentObjID) + .setUpdateID(50) .build(); } @@ -631,8 +589,8 @@ private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, * Create OmMultipartKeyInfo for OBS/LEGACY bucket. */ public static OmMultipartKeyInfo createOmMultipartKeyInfo(String uploadId, - long creationTime, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID) { + long creationTime, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID) { return new OmMultipartKeyInfo.Builder() .setUploadID(uploadId) .setCreationTime(creationTime) @@ -1035,11 +993,31 @@ public static String deleteKey(String ozoneKey, */ public static OMRequest createInitiateMPURequest(String volumeName, String bucketName, String keyName) { + return createInitiateMPURequest(volumeName, bucketName, keyName, Collections.emptyMap(), + Collections.emptyMap()); + } + + /** + * Create OMRequest which encapsulates InitiateMultipartUpload request. + * @param volumeName + * @param bucketName + * @param keyName + * @param metadata + */ + public static OMRequest createInitiateMPURequest(String volumeName, + String bucketName, String keyName, Map metadata, + Map tags) { MultipartInfoInitiateRequest multipartInfoInitiateRequest = MultipartInfoInitiateRequest.newBuilder().setKeyArgs( - KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName) - .setBucketName(bucketName)).build(); + KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setKeyName(keyName) + .setBucketName(bucketName) + .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllTags(KeyValueUtil.toProtobuf(tags)) + ) + .build(); return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload) @@ -1057,14 +1035,31 @@ public static OMRequest createCommitPartMPURequest(String volumeName, String bucketName, String keyName, long clientID, long size, String multipartUploadID, int partNumber) { + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + // Just set dummy size. - KeyArgs.Builder keyArgs = - KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName) - .setBucketName(bucketName) - .setDataSize(size) - .setMultipartNumber(partNumber) - .setMultipartUploadID(multipartUploadID) - .addAllKeyLocations(new ArrayList<>()); + KeyArgs.Builder keyArgs = KeyArgs.newBuilder().setVolumeName(volumeName) + .setKeyName(keyName) + .setBucketName(bucketName) + .setDataSize(size) + .setMultipartNumber(partNumber) + .setMultipartUploadID(multipartUploadID) + .addAllKeyLocations(new ArrayList<>()) + .addMetadata(HddsProtos.KeyValue.newBuilder() + .setKey(OzoneConsts.ETAG) + .setValue(DatatypeConverter.printHexBinary( + new DigestInputStream( + new ByteArrayInputStream( + RandomStringUtils.randomAlphanumeric((int) size) + .getBytes(StandardCharsets.UTF_8)), + eTagProvider) + .getMessageDigest().digest())) + .build()); // Just adding dummy list. As this is for UT only. MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = @@ -1326,6 +1321,41 @@ public static OMRequest createSnapshotRequest(String volumeName, .build(); } + /** + * Create OMRequest for Rename Snapshot. + * + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + */ + public static OMRequest renameSnapshotRequest(String volumeName, + String bucketName, + String snapshotOldName, + String snapshotNewName) { + OzoneManagerProtocolProtos.RenameSnapshotRequest renameSnapshotRequest = + OzoneManagerProtocolProtos.RenameSnapshotRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotOldName(snapshotOldName) + .setSnapshotNewName(snapshotNewName) + .build(); + + OzoneManagerProtocolProtos.UserInfo userInfo = + OzoneManagerProtocolProtos.UserInfo.newBuilder() + .setUserName("user") + .setHostName("host") + .setRemoteAddress("remote-address") + .build(); + + return OMRequest.newBuilder() + .setRenameSnapshotRequest(renameSnapshotRequest) + .setCmdType(Type.RenameSnapshot) + .setClientId(UUID.randomUUID().toString()) + .setUserInfo(userInfo) + .build(); + } + /** * Create OMRequest for Delete Snapshot. * @param volumeName vol to be used @@ -1408,76 +1438,6 @@ public static void addVolumeToOM(OMMetadataManager omMetadataManager, CacheValue.get(1L, omVolumeArgs)); } - /** - * Create OmKeyInfo. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime) { - return createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, objectID, - parentID, trxnLogIndex, creationTime, 0L, false); - } - - /** - * Create OmKeyInfo with isMultipartKey flag. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime, - boolean isMultipartKey) { - return createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, objectID, - parentID, trxnLogIndex, creationTime, 0L, isMultipartKey); - } - - /** - * Create OmKeyInfo. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime, long version) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, parentID, trxnLogIndex, creationTime, - version, false); - } - - /** - * Create OmKeyInfo for FSO bucket. - */ - @SuppressWarnings("parameterNumber") - private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime, long version, - boolean isMultipartKey) { - String fileName = OzoneFSUtils.getFileName(keyName); - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(version, new ArrayList<>(), - isMultipartKey))) - .setCreationTime(creationTime) - .setModificationTime(Time.now()) - .setDataSize(1000L) - .setReplicationConfig(ReplicationConfig - .fromProtoTypeAndFactor(replicationType, replicationFactor)) - .setObjectID(objectID) - .setUpdateID(trxnLogIndex) - .setParentObjectID(parentID) - .setFileName(fileName) - .build(); - } - - /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java index d44b16808aff..00b94824bbf7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java @@ -25,8 +25,7 @@ import java.util.UUID; import io.grpc.Context; -import mockit.Mock; -import mockit.MockUp; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; @@ -37,7 +36,6 @@ import org.mockito.MockedStatic; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -89,54 +87,46 @@ public void setup() throws Exception { @Test public void testUserInfoInCaseOfHadoopTransport() throws Exception { - new MockUp() { - @Mock - public UserGroupInformation getRemoteUser() { - return userGroupInformation; - } - - @Mock - public InetAddress getRemoteIp() { - return inetAddress; - } - - public InetAddress getRemoteAddress() { - return inetAddress; - } - }; - - String bucketName = UUID.randomUUID().toString(); - String volumeName = UUID.randomUUID().toString(); - BucketInfo.Builder bucketInfo = - newBucketInfoBuilder(bucketName, volumeName) - .setIsVersionEnabled(true) - .setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.DISK); - OMRequest omRequest = newCreateBucketRequest(bucketInfo).build(); - - OMBucketCreateRequest omBucketCreateRequest = - new OMBucketCreateRequest(omRequest); - - assertFalse(omRequest.hasUserInfo()); - - OMRequest modifiedRequest = - omBucketCreateRequest.preExecute(ozoneManager); - - assertTrue(modifiedRequest.hasUserInfo()); - - // Now pass modified request to OMBucketCreateRequest and check ugi and - // remote Address. - omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); - - InetAddress remoteAddress = omBucketCreateRequest.getRemoteAddress(); - UserGroupInformation ugi = omBucketCreateRequest.createUGI(); - String hostName = omBucketCreateRequest.getHostName(); - - - // Now check we have original user info, remote address and hostname or not. - // Here from OMRequest user info, converted to UGI, InetAddress and String. - assertEquals(inetAddress.getHostAddress(), remoteAddress.getHostAddress()); - assertEquals(userGroupInformation.getUserName(), ugi.getUserName()); - assertEquals(inetAddress.getHostName(), hostName); + try (MockedStatic mockedRpcServer = + mockStatic(Server.class)) { + + mockedRpcServer.when(Server::getRemoteUser).thenReturn(userGroupInformation); + mockedRpcServer.when(Server::getRemoteIp).thenReturn(inetAddress); + mockedRpcServer.when(Server::getRemoteAddress).thenReturn(inetAddress.toString()); + + String bucketName = UUID.randomUUID().toString(); + String volumeName = UUID.randomUUID().toString(); + BucketInfo.Builder bucketInfo = + newBucketInfoBuilder(bucketName, volumeName) + .setIsVersionEnabled(true) + .setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.DISK); + OMRequest omRequest = newCreateBucketRequest(bucketInfo).build(); + + OMBucketCreateRequest omBucketCreateRequest = + new OMBucketCreateRequest(omRequest); + + assertFalse(omRequest.hasUserInfo()); + + OMRequest modifiedRequest = + omBucketCreateRequest.preExecute(ozoneManager); + + assertTrue(modifiedRequest.hasUserInfo()); + + // Now pass modified request to OMBucketCreateRequest and check ugi and + // remote Address. + omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); + + InetAddress remoteAddress = omBucketCreateRequest.getRemoteAddress(); + UserGroupInformation ugi = omBucketCreateRequest.createUGI(); + String hostName = omBucketCreateRequest.getHostName(); + + + // Now check we have original user info, remote address and hostname or not. + // Here from OMRequest user info, converted to UGI, InetAddress and String. + assertEquals(inetAddress.getHostAddress(), remoteAddress.getHostAddress()); + assertEquals(userGroupInformation.getUserName(), ugi.getUserName()); + assertEquals(inetAddress.getHostName(), hostName); + } } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index 34f348a688dc..fdc13e369c08 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -19,16 +19,21 @@ package org.apache.hadoop.ozone.om.request.bucket; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; + +import java.util.ArrayList; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.Test; @@ -119,12 +124,10 @@ public void testBucketContainsIncompleteMPUs() throws Exception { new OMBucketDeleteRequest(omRequest); // Create a MPU key in the MPU table to simulate incomplete MPU - long creationTime = Time.now(); String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, UUID.randomUUID().toString(), - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, creationTime, true); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, UUID.randomUUID().toString(), + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .build(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. createOmMultipartKeyInfo(uploadId, Time.now(), HddsProtos.ReplicationType.RATIS, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 275e8a6f2aae..7af60c18d94a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -27,7 +27,7 @@ import java.util.stream.Collectors; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -60,6 +60,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -297,8 +298,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 12), 1L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + keyName.substring(0, 12), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = @@ -340,7 +340,7 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), 1L, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); @@ -383,8 +383,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { omMetadataManager); // Add a key with first two levels. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 11), 1L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + keyName.substring(0, 11), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index 0eceb2246ee2..e0460ba81a99 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.om.request.file; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; @@ -59,6 +59,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -422,8 +423,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { // Add a file into the FileTable, this is to simulate "file exists" check. OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, objID++); + bucketName, keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objID++).build(); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); @@ -492,21 +492,22 @@ public void testValidateAndUpdateCacheWithFileExistsInGivenPath() // for index=0, parentID is bucketID OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( - dirs.get(0), objID++, parentID); + dirs.get(0), objID++, parentID); OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, - volumeName, bucketName, txnID, omMetadataManager); + volumeName, bucketName, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); // Add a key in second level. - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, objID); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(THREE)) + .setObjectID(objID) + .build(); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); final String ozoneKey = omMetadataManager.getOzonePathKey( - volumeId, bucketId, parentID, dirs.get(1)); + volumeId, bucketId, parentID, dirs.get(1)); ++txnID; omMetadataManager.getKeyTable(getBucketLayout()) .addCacheEntry(new CacheKey<>(ozoneKey), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index b39068fd7341..20da9d3e5dcc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -24,8 +24,13 @@ import java.util.UUID; import java.util.stream.Collectors; +import org.apache.hadoop.crypto.CipherSuite; +import org.apache.hadoop.crypto.CryptoProtocolVersion; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -57,6 +62,9 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Tests OMFileCreateRequest. @@ -190,7 +198,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() .setBucketName(bucketName) .setBucketLayout(getBucketLayout()) .setQuotaInNamespace(1)); - + OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); @@ -201,6 +209,44 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() OzoneManagerProtocolProtos.Status.QUOTA_EXCEEDED); } + @Test + public void testValidateAndUpdateEncryption() throws Exception { + KeyProviderCryptoExtension.EncryptedKeyVersion eKV = + KeyProviderCryptoExtension.EncryptedKeyVersion.createForDecryption( + "key1", "v1", new byte[0], new byte[0]); + KeyProviderCryptoExtension mockKeyProvider = mock(KeyProviderCryptoExtension.class); + when(mockKeyProvider.generateEncryptedKey(any())).thenReturn(eKV); + + when(ozoneManager.getKmsProvider()).thenReturn(mockKeyProvider); + keyName = "test/" + keyName; + OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, + HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, + false, true); + + // add volume and create bucket with bucket encryption key + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, omMetadataManager, + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketLayout(getBucketLayout()) + .setBucketEncryptionKey( + new BucketEncryptionKeyInfo.Builder() + .setKeyName("key1") + .setSuite(mock(CipherSuite.class)) + .setVersion(mock(CryptoProtocolVersion.class)) + .build())); + + OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); + OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); + + OMFileCreateRequest omFileCreateRequestPreExecuted = getOMFileCreateRequest(modifiedOmRequest); + OMClientResponse omClientResponse = omFileCreateRequestPreExecuted + .validateAndUpdateCache(ozoneManager, 100L); + assertEquals( + OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); + assertTrue(omClientResponse.getOMResponse().getCreateFileResponse().getKeyInfo().hasFileEncryptionInfo()); + when(ozoneManager.getKmsProvider()).thenReturn(null); + } + @Test public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, @@ -243,19 +289,17 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); testNonRecursivePath("a/b", false, false, true); + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/d", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/", 0L, replicationConfig, omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -275,14 +319,14 @@ public void testValidateAndUpdateCacheWithRecursive() throws Exception { // Should be able to create file even if parent directories does not // exist and key already exist, as this is with overwrite enabled. testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/f", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/d/e/f", 0L, replicationConfig, omMetadataManager); testNonRecursivePath("c/d/e/f", true, true, false); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/d", 0L, replicationConfig, omMetadataManager); testNonRecursivePath("a/b/c", false, true, false); } @@ -293,16 +337,17 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() String key = "c/d/e/f"; // Should be able to create file even if parent directories does not exist testNonRecursivePath(key, false, true, false); - + // 3 parent directory created c/d/e assertEquals(omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)) .getUsedNamespace(), 3); - + // Add the key to key table + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + key, 0L, replicationConfig, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true @@ -315,23 +360,21 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() throws Exception { String key = "c/d/e/f"; + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); // Need to add the path which starts with "c/d/e" to keyTable as this is // non-recursive parent should exist. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/d/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/d/e/", 0L, replicationConfig, omMetadataManager); testNonRecursivePath(key, false, false, false); // Add the key to key table OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + key, 0L, replicationConfig, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true @@ -449,10 +492,10 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, if (indx == dirs.size() - 1) { // verify file acls assertEquals(omDirInfo.getObjectID(), omKeyInfo.getParentObjectID()); - List fileAcls = omDirInfo.getAcls(); + List fileAcls = omKeyInfo.getAcls(); System.out.println(" file acls : " + omKeyInfo + " ==> " + fileAcls); assertEquals(expectedInheritAcls.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()), fileAcls, "Failed to inherit parent DEFAULT acls!"); } @@ -471,7 +514,7 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, // Should inherit parent DEFAULT acls // [user:newUser:rw[ACCESS], group:newGroup:rwl[ACCESS]] assertEquals(parentDefaultAcl.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()), keyAcls, "Failed to inherit bucket DEFAULT acls!"); // Should not inherit parent ACCESS acls diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index 1b7b7452c82c..e988949c5b85 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -28,11 +29,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -55,8 +56,7 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { "a/b/c", omMetadataManager); String fileNameD = "d"; OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/" + fileNameD, 0L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -80,7 +80,7 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, - "/test/a1/a2", HddsProtos.ReplicationFactor.ONE, + "/test/a1/a2", ONE, HddsProtos.ReplicationType.RATIS, false, true); // create bucket with quota limit 1 @@ -114,11 +114,11 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() // Add the key to key table OmDirectoryInfo omDirInfo = getDirInfo("c/d/e"); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - omDirInfo.getObjectID() + 10, - omDirInfo.getObjectID(), 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(omDirInfo.getObjectID() + 10) + .setParentObjectID(omDirInfo.getObjectID()) + .setUpdateID(100) + .build(); OMRequestTestUtils.addFileToKeyTable(false, false, "f", omKeyInfo, -1, omDirInfo.getObjectID() + 10, omMetadataManager); @@ -136,23 +136,22 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() String fileName = "f"; String key = parentDir + "/" + fileName; OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager, getBucketLayout()); + omMetadataManager, getBucketLayout()); // Create parent dirs for the path long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, - bucketName, parentDir, omMetadataManager); + bucketName, parentDir, omMetadataManager); // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is // non-recursive parent should exist. testNonRecursivePath(key, false, false, false); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); - OMRequestTestUtils.addFileToKeyTable(false, false, - fileName, omKeyInfo, -1, 50, omMetadataManager); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); // Even if key exists in KeyTable, should be able to create file as // overwrite is set to true diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java index 5757beeb282d..59d3e211efdb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.file; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -45,7 +46,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -423,7 +423,8 @@ protected OMRequest createAllocateBlockRequest(String volumeName, String bucketN KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName) - .setFactor(replicationFactor).setType(replicationType) + .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationConfig.getReplicationType()) .build(); AllocateBlockRequest allocateBlockRequest = @@ -562,8 +563,9 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { String addToOpenFileTable(List locationList, boolean hsyncFlag) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor, 0, parentId, - 0, Time.now(), version); + bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setParentObjectID(parentId) + .build(); omKeyInfo.appendNewBlocks(locationList, false); if (hsyncFlag) { omKeyInfo.getMetadata().put(OzoneConsts.HSYNC_CLIENT_ID, @@ -586,8 +588,9 @@ String addToOpenFileTable(List locationList, boolean hsyncFla String addToFileTable(List locationList, boolean hsyncFlag) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor, 0, parentId, - 0, Time.now(), version); + bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setParentObjectID(parentId) + .build(); omKeyInfo.appendNewBlocks(locationList, false); if (hsyncFlag) { omKeyInfo.getMetadata().put(OzoneConsts.HSYNC_CLIENT_ID, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java index eb99cd932568..9fb0e79953e1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -236,7 +237,8 @@ protected OMRequest createAllocateBlockRequest() { KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName) - .setFactor(replicationFactor).setType(replicationType) + .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationConfig.getReplicationType()) .build(); AllocateBlockRequest allocateBlockRequest = @@ -253,8 +255,8 @@ protected OMRequest createAllocateBlockRequest() { protected String addKeyToOpenKeyTable(String volumeName, String bucketName) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, - omMetadataManager); + keyName, clientID, replicationConfig, + omMetadataManager); return ""; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java index 33512d355c0d..1ecbfed71624 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java @@ -20,10 +20,12 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -31,7 +33,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; /** @@ -65,10 +66,11 @@ protected String addKeyToOpenKeyTable(String volumeName, String bucketName) long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable OMRequestTestUtils.addFileToKeyTable(true, false, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index f040bd508177..cbb782e184fe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -68,7 +68,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationType, replicationFactor, trxnIndex++, + key, clientID, replicationConfig, trxnIndex++, omMetadataManager); String ozoneKey = omMetadataManager.getOzoneKey( volumeName, bucket, key); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java index c9559ff41e1f..b9aa70b4c7e8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java @@ -20,8 +20,12 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; + import java.util.List; import java.util.UUID; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -247,7 +251,7 @@ private OMRequest createSetAclKeyRequest(OzoneAcl acl) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java index 48d92e608b3e..ea9c3223de5a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -26,7 +26,8 @@ import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestWithFSO; import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; /** * Test Key ACL requests for prefix layout. @@ -44,20 +45,22 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); - OmKeyInfo omKeyInfo = OMRequestTestUtils - .createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - parentId + 1, parentId, 100, Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); OMRequestTestUtils .addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); final long volumeId = omMetadataManager.getVolumeId( - omKeyInfo.getVolumeName()); + omKeyInfo.getVolumeName()); final long bucketId = omMetadataManager.getBucketId( - omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); return omMetadataManager.getOzonePathKey( - volumeId, bucketId, omKeyInfo.getParentObjectID(), - fileName); + volumeId, bucketId, omKeyInfo.getParentObjectID(), + fileName); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index 0f77194c88c7..1c38287f55e8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -26,7 +26,11 @@ import java.util.UUID; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -52,10 +56,13 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.when; /** * Class tests OMKeyCommitRequest class. @@ -551,16 +558,17 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { @Test public void testValidateAndUpdateCacheOnOverwrite() throws Exception { + when(ozoneManager.getObjectIdFromTxId(anyLong())).thenAnswer(tx -> + OmUtils.getObjectIdFromTxId(2, tx.getArgument(0))); testValidateAndUpdateCache(); // Become a new client and set next version number clientID = Time.now(); version += 1; - OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest()); + OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest(getKeyLocation(10).subList(4, 10), false)); - OMKeyCommitRequest omKeyCommitRequest = - getOmKeyCommitRequest(modifiedOmRequest); + OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); KeyArgs keyArgs = modifiedOmRequest.getCommitKeyRequest().getKeyArgs(); @@ -572,49 +580,54 @@ public void testValidateAndUpdateCacheOnOverwrite() throws Exception { assertNotNull(omKeyInfo); // Previously committed version - assertEquals(0L, - omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(0L, omKeyInfo.getLatestVersionLocations().getVersion()); // Append new blocks List allocatedLocationList = - keyArgs.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + keyArgs.getKeyLocationsList().stream() + .map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); addKeyToOpenKeyTable(allocatedLocationList); OMClientResponse omClientResponse = omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 102L); - assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); // New entry should be created in key Table. - omKeyInfo = - omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()) - .get(ozoneKey); + omKeyInfo = omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()).get(ozoneKey); assertNotNull(omKeyInfo); - assertEquals(version, - omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(version, omKeyInfo.getLatestVersionLocations().getVersion()); // DB keyInfo format verifyKeyName(omKeyInfo); // Check modification time CommitKeyRequest commitKeyRequest = modifiedOmRequest.getCommitKeyRequest(); - assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), - omKeyInfo.getModificationTime()); + assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); // Check block location. List locationInfoListFromCommitKeyRequest = - commitKeyRequest.getKeyArgs() - .getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + commitKeyRequest.getKeyArgs().getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); - assertEquals(locationInfoListFromCommitKeyRequest, - omKeyInfo.getLatestVersionLocations().getLocationList()); - assertEquals(allocatedLocationList, - omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(locationInfoListFromCommitKeyRequest, omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(allocatedLocationList, omKeyInfo.getLatestVersionLocations().getLocationList()); assertEquals(1, omKeyInfo.getKeyLocationVersions().size()); + + // flush response content to db + BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation(); + ((OMKeyCommitResponse) omClientResponse).addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + // verify deleted key is unique generated + String deletedKey = omMetadataManager.getOzoneKey(volumeName, omKeyInfo.getBucketName(), keyName); + List> rangeKVs + = omMetadataManager.getDeletedTable().getRangeKVs(null, 100, deletedKey); + assertThat(rangeKVs.size()).isGreaterThan(0); + assertEquals(1, rangeKVs.get(0).getValue().getOmKeyInfoList().size()); + assertFalse(rangeKVs.get(0).getKey().endsWith(rangeKVs.get(0).getValue().getOmKeyInfoList().get(0).getObjectID() + + "")); } /** @@ -682,7 +695,8 @@ private OMRequest createCommitKeyRequest( KeyArgs keyArgs = KeyArgs.newBuilder().setDataSize(dataSize).setVolumeName(volumeName) .setKeyName(keyName).setBucketName(bucketName) - .setType(replicationType).setFactor(replicationFactor) + .setType(replicationConfig.getReplicationType()) + .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) .addAllKeyLocations(keyLocations).build(); CommitKeyRequest commitKeyRequest = @@ -727,7 +741,7 @@ protected String getOzonePathKey() throws IOException { protected String addKeyToOpenKeyTable(List locationList) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager, + clientID, replicationConfig, omMetadataManager, locationList, version); return omMetadataManager.getOpenKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java index d258c1cfde43..48cc52773a33 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java @@ -19,19 +19,22 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; +import java.util.ArrayList; import java.util.List; /** @@ -78,10 +81,12 @@ protected String addKeyToOpenKeyTable(List locationList) long objectId = 100; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, - Time.now(), version); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(100L) + .build(); omKeyInfoFSO.appendNewBlocks(locationList, false); String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 12d9d02a72d6..166edb552ce6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -25,12 +25,14 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; import java.util.Map; +import java.util.Collections; import java.util.HashMap; +import java.util.UUID; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; @@ -39,15 +41,18 @@ import org.apache.hadoop.ozone.om.PrefixManagerImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; + +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockProvider; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -65,6 +70,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createOmKeyInfo; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.assertj.core.api.Assertions.assertThat; @@ -137,8 +143,12 @@ public void testValidateAndUpdateCache( when(ozoneManager.getOzoneLockProvider()).thenReturn( new OzoneLockProvider(setKeyPathLock, setFileSystemPaths)); + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + OMRequest modifiedOmRequest = - doPreExecute(createKeyRequest(false, 0)); + doPreExecute(createKeyRequest(false, 0, Collections.emptyMap(), tags)); OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(modifiedOmRequest); @@ -170,10 +180,10 @@ public void testValidateAndUpdateCache( .getCreateKeyResponse().getKeyInfo().getKeyLocationListCount()); // Disk should have 1 version, as it is fresh key create. - assertEquals(1, - omMetadataManager.getOpenKeyTable( - omKeyCreateRequest.getBucketLayout()) - .get(openKey).getKeyLocationVersions().size()); + OmKeyInfo openKeyInfo = omMetadataManager.getOpenKeyTable(omKeyCreateRequest.getBucketLayout()).get(openKey); + + assertEquals(1, openKeyInfo.getKeyLocationVersions().size()); + assertThat(openKeyInfo.getTags()).containsAllEntriesOf(tags); // Write to DB like key commit. omMetadataManager.getKeyTable(omKeyCreateRequest.getBucketLayout()) @@ -181,9 +191,13 @@ public void testValidateAndUpdateCache( .getOpenKeyTable(omKeyCreateRequest.getBucketLayout()) .get(openKey)); + tags.remove("tag-key1"); + tags.remove("tag-key2"); + tags.put("tag-key3", "tag-value3"); + // Override same key again modifiedOmRequest = - doPreExecute(createKeyRequest(false, 0)); + doPreExecute(createKeyRequest(false, 0, Collections.emptyMap(), tags)); id = modifiedOmRequest.getCreateKeyRequest().getClientID(); openKey = getOpenKey(id); @@ -213,6 +227,11 @@ public void testValidateAndUpdateCache( omMetadataManager.getOpenKeyTable( omKeyCreateRequest.getBucketLayout()) .get(openKey).getKeyLocationVersions().size()); + openKeyInfo = omMetadataManager.getOpenKeyTable(omKeyCreateRequest.getBucketLayout()).get(openKey); + + assertEquals(1, openKeyInfo.getKeyLocationVersions().size()); + assertThat(openKeyInfo.getTags()).containsAllEntriesOf(tags); + assertThat(openKeyInfo.getTags()).doesNotContainKeys("tag-key1", "tag-key2"); } @@ -421,7 +440,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound( @MethodSource("data") public void testValidateAndUpdateCacheWithInvalidPath( boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception { - PrefixManager prefixManager = new PrefixManagerImpl( + PrefixManager prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); when(ozoneManager.getOzoneLockProvider()).thenReturn( @@ -463,6 +482,107 @@ public void testValidateAndUpdateCacheWithInvalidPath( assertNull(omKeyInfo); } + + @ParameterizedTest + @MethodSource("data") + public void testOverwritingExistingMetadata( + boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception { + when(ozoneManager.getOzoneLockProvider()).thenReturn( + new OzoneLockProvider(setKeyPathLock, setFileSystemPaths)); + + addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, + getBucketLayout()); + + Map initialMetadata = + Collections.singletonMap("initialKey", "initialValue"); + OMRequest initialRequest = + createKeyRequest(false, 0, keyName, initialMetadata); + OMKeyCreateRequest initialOmKeyCreateRequest = + new OMKeyCreateRequest(initialRequest, getBucketLayout()); + OMClientResponse initialResponse = + initialOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L); + verifyMetadataInResponse(initialResponse, initialMetadata); + + // We have to add the key to the key table, as validateAndUpdateCache only + // updates the cache and not the DB. + OmKeyInfo keyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationConfig).build(); + keyInfo.setMetadata(initialMetadata); + omMetadataManager.getKeyTable(initialOmKeyCreateRequest.getBucketLayout()) + .put(getOzoneKey(), keyInfo); + + Map updatedMetadata = + Collections.singletonMap("initialKey", "updatedValue"); + OMRequest updatedRequest = + createKeyRequest(false, 0, keyName, updatedMetadata); + OMKeyCreateRequest updatedOmKeyCreateRequest = + new OMKeyCreateRequest(updatedRequest, getBucketLayout()); + + OMClientResponse updatedResponse = + updatedOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L); + verifyMetadataInResponse(updatedResponse, updatedMetadata); + } + + @ParameterizedTest + @MethodSource("data") + public void testCreationWithoutMetadataFollowedByOverwriteWithMetadata( + boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception { + when(ozoneManager.getOzoneLockProvider()).thenReturn( + new OzoneLockProvider(setKeyPathLock, setFileSystemPaths)); + addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, + getBucketLayout()); + + // Create the key request without any initial metadata + OMRequest createRequestWithoutMetadata = createKeyRequest(false, 0, keyName, + null); // Passing 'null' for metadata + OMKeyCreateRequest createOmKeyCreateRequest = + new OMKeyCreateRequest(createRequestWithoutMetadata, getBucketLayout()); + + // Perform the create operation without any metadata + OMClientResponse createResponse = + createOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L); + // Verify that no metadata exists in the response + assertThat( + createResponse.getOMResponse().getCreateKeyResponse().getKeyInfo() + .getMetadataList()).isEmpty(); + + OmKeyInfo keyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationConfig).build(); + omMetadataManager.getKeyTable(createOmKeyCreateRequest.getBucketLayout()) + .put(getOzoneKey(), keyInfo); + + // Define new metadata for the overwrite operation + Map overwriteMetadata = new HashMap<>(); + overwriteMetadata.put("newKey", "newValue"); + + // Overwrite the previously created key with new metadata + OMRequest overwriteRequestWithMetadata = + createKeyRequest(false, 0, keyName, overwriteMetadata); + OMKeyCreateRequest overwriteOmKeyCreateRequest = + new OMKeyCreateRequest(overwriteRequestWithMetadata, getBucketLayout()); + + // Perform the overwrite operation and capture the response + OMClientResponse overwriteResponse = + overwriteOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L); + // Verify the new metadata is correctly applied in the response + verifyMetadataInResponse(overwriteResponse, overwriteMetadata); + } + + + private void verifyMetadataInResponse(OMClientResponse response, + Map expectedMetadata) { + // Extract metadata from the response + List metadataList = + response.getOMResponse().getCreateKeyResponse().getKeyInfo() + .getMetadataList(); + assertEquals(expectedMetadata.size(), metadataList.size()); + metadataList.forEach(kv -> { + String expectedValue = expectedMetadata.get(kv.getKey()); + assertEquals(expectedValue, kv.getValue(), + "Metadata value mismatch for key: " + kv.getKey()); + }); + } + /** * This method calls preExecute and verify the modified request. * @param originalOMRequest @@ -537,29 +657,78 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { @SuppressWarnings("parameterNumber") protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) { - return createKeyRequest(isMultipartKey, partNumber, keyName); + return createKeyRequest(isMultipartKey, partNumber, Collections.emptyMap(), Collections.emptyMap()); + } + + protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, + Map metadata, Map tags) { + return createKeyRequest(isMultipartKey, partNumber, keyName, metadata, tags); } private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, String keyName) { + return createKeyRequest(isMultipartKey, partNumber, keyName, null); + } + + protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, + String keyName, + Map metadata) { + return createKeyRequest(isMultipartKey, partNumber, keyName, metadata, null); + } + /** + * Create OMRequest which encapsulates a CreateKeyRequest, optionally + * with metadata. + * + * @param isMultipartKey Indicates if the key is part of a multipart upload. + * @param partNumber The part number for multipart uploads, ignored if + * isMultipartKey is false. + * @param keyName The name of the key to create or update. + * @param metadata Optional metadata for the key. Pass null or an empty + * map if no metadata is to be set. + * @param tags Optional tags for the key. Pass null or an empty + * map if no tags is to be set. + * @return OMRequest configured with the provided parameters. + */ + protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, + String keyName, + Map metadata, + Map tags) { KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setIsMultipartKey(isMultipartKey) - .setFactor(replicationFactor).setType(replicationType) + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setIsMultipartKey(isMultipartKey) + .setFactor( + ((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationConfig.getReplicationType()) .setLatestVersionLocation(true); + // Configure for multipart upload, if applicable if (isMultipartKey) { keyArgs.setDataSize(dataSize).setMultipartNumber(partNumber); } + // Include metadata, if provided + if (metadata != null && !metadata.isEmpty()) { + metadata.forEach((key, value) -> keyArgs.addMetadata(KeyValue.newBuilder() + .setKey(key) + .setValue(value) + .build())); + } + + if (tags != null && !tags.isEmpty()) { + keyArgs.addAllTags(KeyValueUtil.toProtobuf(tags)); + } + OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest = CreateKeyRequest.newBuilder().setKeyArgs(keyArgs).build(); return OMRequest.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) .setClientId(UUID.randomUUID().toString()) - .setCreateKeyRequest(createKeyRequest).build(); + .setCreateKeyRequest(createKeyRequest) + .build(); } private OMRequest createKeyRequest( @@ -783,7 +952,7 @@ private void verifyKeyInheritAcls(List keyAcls, // Should inherit parent DEFAULT Acls assertEquals(parentDefaultAcl.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()), keyAcls, "Failed to inherit parent DEFAULT acls!,"); @@ -793,7 +962,7 @@ private void verifyKeyInheritAcls(List keyAcls, protected void addToKeyTable(String keyName) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); + keyName.substring(1), 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index 0750c9512618..2a25a9b09686 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -42,6 +41,7 @@ import java.util.Arrays; import java.util.Collection; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -107,12 +107,13 @@ protected void addToKeyTable(String keyName) throws Exception { Path keyPath = Paths.get(keyName); long parentId = checkIntermediatePaths(keyPath); String fileName = OzoneFSUtils.getFileName(keyName); - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fileName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, - Time.now()); - OMRequestTestUtils.addFileToKeyTable(false, false, - fileName, omKeyInfo, -1, 50, omMetadataManager); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fileName, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index 00d1883d749c..9f1bee28c047 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -192,8 +192,8 @@ protected String addKeyToTable() throws Exception { protected String addKeyToTable(String key) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, key, clientID, replicationType, replicationFactor, - omMetadataManager); + bucketName, key, clientID, replicationConfig, + omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index 9dafab090295..07094ad2923f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -18,13 +18,14 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.OzonePrefixPathImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -33,7 +34,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.security.acl.OzonePrefixPath; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -72,11 +72,11 @@ protected String addKeyToTable() throws Exception { bucketName, PARENT_DIR, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); omKeyInfo.setKeyName(FILE_NAME); OMRequestTestUtils.addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); @@ -96,11 +96,11 @@ protected String addKeyToDirTable(String volumeName, String bucketName, bucketName, key, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); omKeyInfo.setKeyName(key); return omKeyInfo.getPath(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index a1d616c07563..a912f549b3ce 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -31,7 +30,6 @@ import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; @@ -44,7 +42,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -76,7 +73,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationType, replicationFactor, trxnIndex++, + key, clientID, replicationConfig, trxnIndex++, omMetadataManager); ozoneKeyNames.add(omMetadataManager.getOzoneKey( volumeName, bucket, key)); @@ -219,13 +216,12 @@ public void testKeyPurgeInSnapshot() throws Exception { .setName("snap1") .build(); - ReferenceCounted rcOmSnapshot = - ozoneManager.getOmSnapshotManager().checkForSnapshot( + ReferenceCounted rcOmSnapshot = + ozoneManager.getOmSnapshotManager().getSnapshot( fromSnapshotInfo.getVolumeName(), fromSnapshotInfo.getBucketName(), - getSnapshotPrefix(fromSnapshotInfo.getName()), - true); - OmSnapshot omSnapshot = (OmSnapshot) rcOmSnapshot.get(); + fromSnapshotInfo.getName()); + OmSnapshot omSnapshot = rcOmSnapshot.get(); // The keys should be present in the snapshot's deletedTable for (String deletedKey : deletedKeyNames) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java index a6015870d09b..0a2dcfd5d67a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java @@ -240,7 +240,7 @@ protected OMRequest createRenameKeyRequest( protected OmKeyInfo getOmKeyInfo(String keyName) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, 0L); + replicationConfig).build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java index c91b8e158214..40c5156b5dbe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java @@ -18,12 +18,14 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; + import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -37,7 +39,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -179,10 +180,10 @@ private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - bucketId + 100L, bucketId + 101L, 0L, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(bucketId + 100L) + .setParentObjectID(bucketId + 101L) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 4dfb3c67c963..e5c3f19b8506 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -25,6 +25,7 @@ import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -44,7 +45,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; @@ -58,7 +58,6 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -115,8 +114,7 @@ public class TestOMKeyRequest { protected String volumeName; protected String bucketName; protected String keyName; - protected HddsProtos.ReplicationType replicationType; - protected HddsProtos.ReplicationFactor replicationFactor; + protected ReplicationConfig replicationConfig; protected long clientID; protected long scmBlockSize = 1000L; protected long dataSize; @@ -177,7 +175,7 @@ public void setup() throws Exception { when(ozoneManager.getAccessAuthorizer()) .thenReturn(new OzoneNativeAuthorizer()); - ReferenceCounted rcOmMetadataReader = + ReferenceCounted rcOmMetadataReader = mock(ReferenceCounted.class); when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); // Init OmMetadataReader to let the test pass @@ -220,8 +218,7 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationFactor = HddsProtos.ReplicationFactor.ONE; - replicationType = HddsProtos.ReplicationType.RATIS; + replicationConfig = RatisReplicationConfig.getInstance(ReplicationFactor.ONE); clientID = Time.now(); dataSize = 1000L; random = new Random(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java index d48131de4bd3..d0cfd48e35dc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -31,6 +31,7 @@ import java.util.List; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -145,8 +146,7 @@ protected void createPreRequisites() throws Exception { for (int i = 0; i < count; i++) { key = parentDir.concat("/key" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, omMetadataManager); + parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); deleteKeyArgs.addKeys(key); deleteKeyList.add(key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java index f28ca2e2685f..2da80550275a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; /** @@ -83,11 +83,13 @@ protected void createPreRequisites() throws Exception { long parentId = OMRequestTestUtils .addParentsToDirTable(volumeName, bucketName, dir, omMetadataManager); - OmKeyInfo omKeyInfo = OMRequestTestUtils - .createOmKeyInfo(volumeName, bucketName, dir + "/" + file, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, - Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, dir + "/" + file, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); omKeyInfo.setKeyName(file); OMRequestTestUtils .addFileToKeyTable(false, false, file, omKeyInfo, -1, 50, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java index 3d429f4d6847..340b6e36eb0b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java @@ -18,12 +18,14 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -127,8 +129,7 @@ private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, omMetadataManager); + parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() .setFromKeyName(key) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java index 9c5a9257245f..8671ff107131 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java @@ -50,7 +50,7 @@ public class TestOMPrefixAclRequest extends TestOMKeyRequest { @Test public void testAddAclRequest() throws Exception { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; @@ -116,7 +116,7 @@ public void testAddAclRequest() throws Exception { @Test public void testValidationFailure() { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); @@ -128,7 +128,7 @@ public void testValidationFailure() { ); OMClientResponse response1 = invalidRequest1.validateAndUpdateCache(ozoneManager, 1); - assertEquals(OzoneManagerProtocolProtos.Status.PREFIX_NOT_FOUND, + assertEquals(OzoneManagerProtocolProtos.Status.INVALID_PATH_IN_ACL_REQUEST, response1.getOMResponse().getStatus()); // Not a valid FS path @@ -143,7 +143,7 @@ public void testValidationFailure() { @Test public void testRemoveAclRequest() throws Exception { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; @@ -223,7 +223,7 @@ public void testRemoveAclRequest() throws Exception { @Test public void testSetAclRequest() throws Exception { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java index bfae424cc954..ad834fa556bf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java @@ -100,7 +100,7 @@ private OMRequest createSetTimesKeyRequest(long mtime, long atime) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, + keyName, clientID, replicationConfig, 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java index 2cd9273c25a5..0960125b0575 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.junit.jupiter.api.Test; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -115,10 +115,13 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, PARENT_DIR, omMetadataManager); - OmKeyInfo omKeyInfo = OMRequestTestUtils - .createOmKeyInfo(volumeName, bucketName, FILE_NAME, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - parentId + 1, parentId, 100, Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_NAME, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); OMRequestTestUtils .addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java index 25c908b18a2d..f02e1ee23679 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -27,14 +28,15 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -51,7 +53,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -480,10 +481,13 @@ private List createMPUsWithFSO(String volume, String bucket, commitMultipartRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); // Add key to open key table to be used in MPU commit processing - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentID + j, parentID, - trxnLogIndex, Time.now(), true); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + j) + .setParentObjectID(parentID) + .setUpdateID(trxnLogIndex) + .build(); + String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -563,8 +567,7 @@ private List createMPUs(String volume, String bucket, // Add key to open key table to be used in MPU commit processing OMRequestTestUtils.addKeyToTable( true, true, - volume, bucket, keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + volume, bucket, keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMClientResponse commitResponse = s3MultipartUploadCommitPartRequest.validateAndUpdateCache( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index 40b0c23e5a9e..30b76801d9e8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -25,7 +25,9 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.stream.Collectors; @@ -62,8 +64,16 @@ public void testValidateAndUpdateCache() throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, - bucketName, keyName); + bucketName, keyName, customMetadata, tags); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = getS3InitiateMultipartUploadReq(modifiedRequest); @@ -84,6 +94,13 @@ public void testValidateAndUpdateCache() throws Exception { assertNotNull(openMPUKeyInfo); assertNotNull(openMPUKeyInfo.getLatestVersionLocations()); assertTrue(openMPUKeyInfo.getLatestVersionLocations().isMultipartKey()); + assertNotNull(openMPUKeyInfo.getMetadata()); + assertEquals("custom-value1", openMPUKeyInfo.getMetadata().get("custom-key1")); + assertEquals("custom-value2", openMPUKeyInfo.getMetadata().get("custom-key2")); + assertNotNull(openMPUKeyInfo.getTags()); + assertEquals("tag-value1", openMPUKeyInfo.getTags().get("tag-key1")); + assertEquals("tag-value2", openMPUKeyInfo.getTags().get("tag-key2")); + assertNotNull(omMetadataManager.getMultipartInfoTable().get(multipartKey)); assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest() @@ -224,21 +241,20 @@ private void verifyKeyInheritAcls(List keyAcls, List parentDefaultAcl = bucketAcls.stream() .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.DEFAULT) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()); - OzoneAcl parentAccessAcl = bucketAcls.stream() + List parentAccessAcl = bucketAcls.stream() .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.ACCESS) - .findAny().orElse(null); + .collect(Collectors.toList()); // Should inherit parent DEFAULT Acls // [user:newUser:rw[DEFAULT], group:newGroup:rwl[DEFAULT]] - assertEquals(parentDefaultAcl.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) - .collect(Collectors.toList()), keyAcls, + assertEquals(parentDefaultAcl, keyAcls, "Failed to inherit parent DEFAULT acls!"); // Should not inherit parent ACCESS Acls - assertThat(keyAcls).doesNotContain(parentAccessAcl); + assertThat(keyAcls).doesNotContainAnyElementsOf(parentAccessAcl); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java index ab05f927e1d6..1d4eb5310e05 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java @@ -36,7 +36,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.stream.Collectors; @@ -62,11 +64,19 @@ public void testValidateAndUpdateCache() throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); OMRequest modifiedRequest = doPreExecuteInitiateMPUWithFSO(volumeName, - bucketName, keyName); + bucketName, keyName, customMetadata, tags); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadReqFSO = getS3InitiateMultipartUploadReq(modifiedRequest); @@ -98,10 +108,15 @@ public void testValidateAndUpdateCache() throws Exception { assertTrue( omKeyInfo.getLatestVersionLocations().isMultipartKey(), "isMultipartKey is false!"); - assertEquals(fileName, omKeyInfo.getKeyName(), - "FileName mismatches!"); + assertEquals(fileName, omKeyInfo.getFileName(), "FileName mismatches!"); assertEquals(parentID, omKeyInfo.getParentObjectID(), "ParentId mismatches!"); + assertNotNull(omKeyInfo.getMetadata()); + assertEquals("custom-value1", omKeyInfo.getMetadata().get("custom-key1")); + assertEquals("custom-value2", omKeyInfo.getMetadata().get("custom-key2")); + assertNotNull(omKeyInfo.getTags()); + assertEquals("tag-value1", omKeyInfo.getTags().get("tag-key1")); + assertEquals("tag-value2", omKeyInfo.getTags().get("tag-key2")); OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager .getMultipartInfoTable().get(multipartFileKey); @@ -256,7 +271,7 @@ private void verifyKeyInheritAcls(List dirs, OmKeyInfo fileInfo, List fileAcls = fileInfo.getAcls(); System.out.println(" file acls : " + fileInfo + " ==> " + fileAcls); assertEquals(expectedInheritAcls.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()), fileAcls, "Failed to inherit parent DEFAULT acls!"); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index c01bb459b8f4..bd93fe176e93 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -21,10 +21,13 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.Collections; import java.util.List; +import java.util.Map; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; @@ -39,7 +42,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataReader; import org.apache.hadoop.ozone.om.IOmMetadataReader; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -86,7 +88,7 @@ public void setup() throws Exception { when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); auditLogger = mock(AuditLogger.class); - ReferenceCounted rcOmMetadataReader = + ReferenceCounted rcOmMetadataReader = mock(ReferenceCounted.class); when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); // Init OmMetadataReader to let the test pass @@ -131,9 +133,41 @@ public void stop() { */ protected OMRequest doPreExecuteInitiateMPU( String volumeName, String bucketName, String keyName) throws Exception { + return doPreExecuteInitiateMPU(volumeName, bucketName, keyName, Collections.emptyMap()); + } + + /** + * Perform preExecute of Initiate Multipart upload request for given + * volume, bucket and key name. + * @param volumeName + * @param bucketName + * @param keyName + * @param metadata + * @return OMRequest - returned from preExecute. + */ + protected OMRequest doPreExecuteInitiateMPU( + String volumeName, String bucketName, String keyName, + Map metadata) throws Exception { + return doPreExecuteInitiateMPU(volumeName, bucketName, keyName, metadata, + Collections.emptyMap()); + } + + /** + * Perform preExecute of Initiate Multipart upload request for given + * volume, bucket and key name. + * @param volumeName + * @param bucketName + * @param keyName + * @param metadata + * @param tags + * @return OMRequest - returned from preExecute. + */ + protected OMRequest doPreExecuteInitiateMPU( + String volumeName, String bucketName, String keyName, + Map metadata, Map tags) throws Exception { OMRequest omRequest = OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, - keyName); + keyName, metadata, tags); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = getS3InitiateMultipartUploadReq(omRequest); @@ -148,6 +182,22 @@ protected OMRequest doPreExecuteInitiateMPU( assertThat(modifiedRequest.getInitiateMultiPartUploadRequest() .getKeyArgs().getModificationTime()).isGreaterThan(0); + if (metadata != null) { + Map modifiedKeyMetadata = KeyValueUtil.getFromProtobuf( + modifiedRequest.getInitiateMultiPartUploadRequest() + .getKeyArgs().getMetadataList()); + + assertThat(modifiedKeyMetadata).containsAllEntriesOf(metadata); + } + + if (tags != null) { + Map modifiedKeyTags = KeyValueUtil.getFromProtobuf( + modifiedRequest.getInitiateMultiPartUploadRequest() + .getKeyArgs().getTagsList()); + + assertThat(modifiedKeyTags).containsAllEntriesOf(tags); + } + return modifiedRequest; } @@ -248,9 +298,26 @@ protected OMRequest doPreExecuteCompleteMPU( */ protected OMRequest doPreExecuteInitiateMPUWithFSO( String volumeName, String bucketName, String keyName) throws Exception { + return doPreExecuteInitiateMPUWithFSO(volumeName, bucketName, keyName, + Collections.emptyMap(), Collections.emptyMap()); + } + + /** + * Perform preExecute of Initiate Multipart upload request for given + * volume, bucket and key name. + * @param volumeName + * @param bucketName + * @param keyName + * @param metadata + * @param tags + * @return OMRequest - returned from preExecute. + */ + protected OMRequest doPreExecuteInitiateMPUWithFSO( + String volumeName, String bucketName, String keyName, + Map metadata, Map tags) throws Exception { OMRequest omRequest = OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, - keyName); + keyName, metadata, tags); S3InitiateMultipartUploadRequestWithFSO s3InitiateMultipartUploadRequestWithFSO = @@ -266,6 +333,21 @@ protected OMRequest doPreExecuteInitiateMPUWithFSO( .getKeyArgs().getMultipartUploadID()); assertThat(modifiedRequest.getInitiateMultiPartUploadRequest() .getKeyArgs().getModificationTime()).isGreaterThan(0); + if (metadata != null) { + Map modifiedKeyMetadata = KeyValueUtil.getFromProtobuf( + modifiedRequest.getInitiateMultiPartUploadRequest() + .getKeyArgs().getMetadataList()); + + assertThat(modifiedKeyMetadata).containsAllEntriesOf(metadata); + } + + if (tags != null) { + Map modifiedKeyTags = KeyValueUtil.getFromProtobuf( + modifiedRequest.getInitiateMultiPartUploadRequest() + .getKeyArgs().getTagsList()); + + assertThat(modifiedKeyTags).containsAllEntriesOf(tags); + } return modifiedRequest; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 61c792a83de3..014b4e021cb3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -24,6 +24,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -224,9 +226,8 @@ public void testValidateAndUpdateCacheBucketFound() throws Exception { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { - OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), omMetadataManager); } protected String getKeyName() { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index 4c8e4881d925..24480c249cc8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -24,15 +24,17 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import java.io.IOException; +import java.util.ArrayList; import java.util.UUID; /** @@ -68,13 +70,16 @@ protected String getKeyName() { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { long txnLogId = 0L; - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID, - txnLogId, Time.now(), true); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + 1) + .setParentObjectID(parentID) + .setUpdateID(txnLogId) + .build(); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, - fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); + fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 733c790bcf17..db77d29ee7c8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -25,19 +26,22 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.junit.jupiter.api.Test; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -70,13 +74,30 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + + String uploadId = checkValidateAndUpdateCacheSuccess( - volumeName, bucketName, keyName); + volumeName, bucketName, keyName, customMetadata, tags); checkDeleteTableCount(volumeName, bucketName, keyName, 0, uploadId); + customMetadata.remove("custom-key1"); + customMetadata.remove("custom-key2"); + customMetadata.put("custom-key3", "custom-value3"); + + tags.remove("tag-key1"); + tags.remove("tag-key2"); + tags.put("tag-key3", "tag-value3"); + // Do it twice to test overwrite uploadId = checkValidateAndUpdateCacheSuccess(volumeName, bucketName, - keyName); + keyName, customMetadata, tags); // After overwrite, one entry must be in delete table checkDeleteTableCount(volumeName, bucketName, keyName, 1, uploadId); } @@ -104,10 +125,10 @@ public void checkDeleteTableCount(String volumeName, } private String checkValidateAndUpdateCacheSuccess(String volumeName, - String bucketName, String keyName) throws Exception { + String bucketName, String keyName, Map metadata, Map tags) throws Exception { OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName, - bucketName, keyName); + bucketName, keyName, metadata, tags); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = getS3InitiateMultipartUploadReq(initiateMPURequest); @@ -132,9 +153,14 @@ private String checkValidateAndUpdateCacheSuccess(String volumeName, List partList = new ArrayList<>(); - String partName = getPartName(volumeName, bucketName, keyName, - multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1) + String eTag = s3MultipartUploadCommitPartRequest.getOmRequest() + .getCommitMultiPartUploadRequest() + .getKeyArgs() + .getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) + .findFirst().get().getValue(); + partList.add(Part.newBuilder().setETag(eTag).setPartName(eTag).setPartNumber(1) .build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, @@ -168,6 +194,12 @@ private String checkValidateAndUpdateCacheSuccess(String volumeName, assertNotNull(multipartKeyInfo.getLatestVersionLocations()); assertTrue(multipartKeyInfo.getLatestVersionLocations() .isMultipartKey()); + if (metadata != null) { + assertThat(multipartKeyInfo.getMetadata()).containsAllEntriesOf(metadata); + } + if (tags != null) { + assertThat(multipartKeyInfo.getTags()).containsAllEntriesOf(tags); + } OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable() .getCacheValue(new CacheKey<>( @@ -222,10 +254,10 @@ public void testInvalidPartOrderError() throws Exception { String partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 23); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(23).build()); partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(1).build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, bucketName, keyName, multipartUploadID, partList); @@ -315,8 +347,7 @@ public void testValidateAndUpdateCacheNoSuchMultipartUploadError() protected void addKeyToTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); } protected String getMultipartKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 5926b5fd1d9c..1762f38b44bd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -18,18 +18,21 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import java.io.IOException; +import java.util.ArrayList; import java.util.UUID; /** @@ -72,10 +75,12 @@ protected void addKeyToTable(String volumeName, String bucketName, long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now(), true); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index 45e5b1007531..a3e83986b531 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -52,6 +52,7 @@ import java.io.IOException; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getFromProtobuf; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createSnapshotRequest; @@ -321,8 +322,9 @@ private void renameDir(String fromKey, String toKey, long offset) throws Exception { String fromKeyParentName = UUID.randomUUID().toString(); OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, 100L); + bucketName, fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) + .setObjectID(100L) + .build(); OmKeyInfo toKeyInfo = addKey(toKey, offset + 4L); OmKeyInfo fromKeyInfo = addKey(fromKey, offset + 5L); @@ -381,8 +383,8 @@ public static OMSnapshotCreateRequest doPreExecute( private OmKeyInfo addKey(String keyName, long objectId) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, - objectId); + RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) + .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java index ca737d2bd254..03dc7862e35a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -110,8 +109,6 @@ public void setup() throws Exception { doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.getSnapshotCache()) - .thenReturn(mock(SnapshotCache.class)); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); volumeName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index a3b0dae46315..8edd096e766c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.IOmMetadataReader; @@ -37,7 +38,6 @@ import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; @@ -64,10 +64,12 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; @@ -78,8 +80,6 @@ * Tests OMSnapshotPurgeRequest class. */ public class TestOMSnapshotPurgeRequestAndResponse { - - private BatchOperation batchOperation; private List checkpointPaths = new ArrayList<>(); private OzoneManager ozoneManager; @@ -115,7 +115,7 @@ void setup(@TempDir File testDir) throws Exception { when(ozoneManager.isAdmin(any())).thenReturn(true); when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - ReferenceCounted rcOmMetadataReader = + ReferenceCounted rcOmMetadataReader = mock(ReferenceCounted.class); when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); omSnapshotManager = new OmSnapshotManager(ozoneManager); @@ -178,7 +178,6 @@ private void createSnapshotCheckpoint(String snapshotName) throws Exception { private void createSnapshotCheckpoint(String volume, String bucket, String snapshotName) throws Exception { - batchOperation = omMetadataManager.getStore().initBatchOperation(); OMRequest omRequest = OMRequestTestUtils .createSnapshotRequest(volume, bucket, snapshotName); // Pre-Execute OMSnapshotCreateRequest. @@ -189,9 +188,10 @@ private void createSnapshotCheckpoint(String volume, OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); // Add to batch and commit to DB. - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - batchOperation.close(); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); SnapshotInfo snapshotInfo = @@ -227,19 +227,35 @@ private void purgeSnapshots(OMRequest snapshotPurgeRequest) omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); // Commit to DB. - batchOperation = omMetadataManager.getStore().initBatchOperation(); - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } } @Test public void testValidateAndUpdateCache() throws Exception { + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( snapshotDbKeysToPurge); - purgeSnapshots(snapshotPurgeRequest); + + OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + + for (String snapshotTableKey: snapshotDbKeysToPurge) { + assertNull(omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey)); + } + + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } // Check if the entries are deleted. assertTrue(omMetadataManager.getSnapshotInfoTable().isEmpty()); @@ -248,6 +264,36 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } + assertEquals(initialSnapshotPurgeCount + 1, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount, omMetrics.getNumSnapshotPurgeFails()); + } + + /** + * This test is mainly to validate metrics and error code. + */ + @Test + public void testValidateAndUpdateCacheFailure() throws Exception { + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + + List snapshotDbKeysToPurge = createSnapshots(10); + + OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); + Table mockedSnapshotInfoTable = mock(Table.class); + + when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); + when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + + OMRequest snapshotPurgeRequest = createPurgeKeysRequest(snapshotDbKeysToPurge); + OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + + assertEquals(INTERNAL_ERROR, omSnapshotPurgeResponse.getOMResponse().getStatus()); + assertEquals(initialSnapshotPurgeCount, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount + 1, omMetrics.getNumSnapshotPurgeFails()); } // TODO: clean up: Do we this test after diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java new file mode 100644 index 000000000000..14af3e28b8b8 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java @@ -0,0 +1,359 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.request.snapshot; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.File; +import java.util.UUID; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getFromProtobuf; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createSnapshotRequest; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.renameSnapshotRequest; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RenameSnapshot; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.framework; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests OMSnapshotRenameRequest class, which handles RenameSnapshot request. + */ +public class TestOMSnapshotRenameRequest { + + @TempDir + private File anotherTempDir; + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OmMetadataManagerImpl omMetadataManager; + private BatchOperation batchOperation; + + private String volumeName; + private String bucketName; + private String snapshotName1; + private String snapshotName2; + + @BeforeEach + public void setup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + anotherTempDir.getAbsolutePath()); + ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + anotherTempDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(false); + when(ozoneManager.isOwner(any(), any())).thenReturn(false); + when(ozoneManager.getBucketOwner(any(), any(), + any(), any())).thenReturn("dummyBucketOwner"); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + AuditLogger auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); + OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); + snapshotName1 = UUID.randomUUID().toString(); + snapshotName2 = UUID.randomUUID().toString(); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + } + + @AfterEach + public void stop() { + omMetrics.unRegister(); + framework().clearInlineMocks(); + if (batchOperation != null) { + batchOperation.close(); + } + } + + @ValueSource(strings = { + // '-' is allowed. + "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", + // 3 chars name is allowed. + "sn1", + // less than or equal to 63 chars are allowed. + "snap75795657617173401188448010125899089001363595171500499231286" + }) + @ParameterizedTest + public void testPreExecute(String toSnapshotName) throws Exception { + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + + String currentSnapshotName = "current"; + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, currentSnapshotName, toSnapshotName); + doPreExecute(omRequest); + } + + @ValueSource(strings = { + // ? is not allowed in snapshot name. + "a?b", + // only numeric name not allowed. + "1234", + // less than 3 chars are not allowed. + "s1", + // more than or equal to 64 chars are not allowed. + "snap156808943643007724443266605711479126926050896107709081166294", + // Underscore is not allowed. + "snap_1", + // CamelCase is not allowed. + "NewSnapshot" + }) + @ParameterizedTest + public void testPreExecuteFailure(String toSnapshotName) { + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + String currentSnapshotName = "current"; + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, currentSnapshotName, toSnapshotName); + OMException omException = + assertThrows(OMException.class, () -> doPreExecute(omRequest)); + assertEquals("Invalid snapshot name: " + toSnapshotName, + omException.getMessage()); + } + + @Test + public void testPreExecuteBadOwner() { + // Owner is not set for the request. + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, snapshotName1, snapshotName2); + + OMException omException = assertThrows(OMException.class, + () -> doPreExecute(omRequest)); + assertEquals("Only bucket owners and Ozone admins can rename snapshots", + omException.getMessage()); + } + + @Test + public void testValidateAndUpdateCache() throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, snapshotName1, snapshotName2); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + String key = getTableKey(volumeName, bucketName, snapshotName1); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + + // Add a 1000-byte key to the bucket + OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); + addKeyToTable(key1); + + OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + bucketKey); + long bucketDataSize = key1.getDataSize(); + long bucketUsedBytes = omBucketInfo.getUsedBytes(); + assertEquals(key1.getReplicatedSize(), bucketUsedBytes); + + // Value in cache should be null as of now. + assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + + // Add key to cache. + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotName1, UUID.randomUUID(), Time.now()); + snapshotInfo.setReferencedSize(1000L); + snapshotInfo.setReferencedReplicatedSize(3 * 1000L); + assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(key), + CacheValue.get(1L, snapshotInfo)); + + // Run validateAndUpdateCache. + OMClientResponse omClientResponse = + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 2L); + + assertNotNull(omClientResponse.getOMResponse()); + + OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); + assertNotNull(omResponse.getRenameSnapshotResponse()); + assertEquals(RenameSnapshot, omResponse.getCmdType()); + assertEquals(OK, omResponse.getStatus()); + + // verify table data with response data. + OzoneManagerProtocolProtos.SnapshotInfo snapshotInfoProto = + omClientResponse + .getOMResponse() + .getRenameSnapshotResponse() + .getSnapshotInfo(); + + assertEquals(bucketDataSize, snapshotInfoProto.getReferencedSize()); + assertEquals(bucketUsedBytes, + snapshotInfoProto.getReferencedReplicatedSize()); + + SnapshotInfo snapshotInfoOldProto = getFromProtobuf(snapshotInfoProto); + + String key2 = getTableKey(volumeName, bucketName, snapshotName2); + + // Get value from cache + SnapshotInfo snapshotInfoNewInCache = + omMetadataManager.getSnapshotInfoTable().get(key2); + assertNotNull(snapshotInfoNewInCache); + assertEquals(snapshotInfoOldProto, snapshotInfoNewInCache); + assertEquals(snapshotInfo.getSnapshotId(), snapshotInfoNewInCache.getSnapshotId()); + + SnapshotInfo snapshotInfoOldInCache = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNull(snapshotInfoOldInCache); + } + + @Test + public void testEntryExists() throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + + String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); + String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + // First make sure we have two snapshots. + OzoneManagerProtocolProtos.OMRequest createOmRequest = + createSnapshotRequest(volumeName, bucketName, snapshotName1); + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + + createOmRequest = + createSnapshotRequest(volumeName, bucketName, snapshotName2); + omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); + + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + // Now try renaming and get an error. + OzoneManagerProtocolProtos.OMRequest omRequest = + renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + + OMClientResponse omClientResponse = + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); + assertNotNull(omResponse.getRenameSnapshotResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS, + omResponse.getStatus()); + } + + @Test + public void testEntryNotFound() throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + + String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); + String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + // Now try renaming and get an error. + OzoneManagerProtocolProtos.OMRequest omRequest = + renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + + OMClientResponse omClientResponse = + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); + assertNotNull(omResponse.getRenameSnapshotResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.FILE_NOT_FOUND, + omResponse.getStatus()); + } + + private OMSnapshotRenameRequest doPreExecute( + OzoneManagerProtocolProtos.OMRequest originalRequest) throws Exception { + return doPreExecute(originalRequest, ozoneManager); + } + + public static OMSnapshotRenameRequest doPreExecute( + OzoneManagerProtocolProtos.OMRequest originalRequest, OzoneManager ozoneManager) throws Exception { + OMSnapshotRenameRequest omSnapshotRenameRequest = + new OMSnapshotRenameRequest(originalRequest); + + OzoneManagerProtocolProtos.OMRequest modifiedRequest = + omSnapshotRenameRequest.preExecute(ozoneManager); + return new OMSnapshotRenameRequest(modifiedRequest); + } + + private OmKeyInfo addKey(String keyName, long objectId) { + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) + .build(); + } + + protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { + OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, + omMetadataManager); + return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), + keyInfo.getBucketName(), keyInfo.getKeyName()); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index 3856a5b62f5f..b5bfc2714b0f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -48,6 +49,7 @@ import java.util.List; import java.util.UUID; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.Mockito.anyString; @@ -62,7 +64,7 @@ public class TestOMSnapshotSetPropertyRequestAndResponse { private BatchOperation batchOperation; private OzoneManager ozoneManager; private OMMetadataManager omMetadataManager; - + private OMMetrics omMetrics; private String volumeName; private String bucketName; private String snapName; @@ -71,6 +73,7 @@ public class TestOMSnapshotSetPropertyRequestAndResponse { @BeforeEach void setup(@TempDir File testDir) throws Exception { + omMetrics = OMMetrics.create(); ozoneManager = mock(OzoneManager.class); OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); when(lvm.isAllowed(anyString())).thenReturn(true); @@ -84,6 +87,7 @@ void setup(@TempDir File testDir) throws Exception { omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); @@ -94,6 +98,9 @@ void setup(@TempDir File testDir) throws Exception { @Test public void testValidateAndUpdateCache() throws IOException { + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + createSnapshotDataForTest(); assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = @@ -120,6 +127,9 @@ public void testValidateAndUpdateCache() throws IOException { omMetadataManager.getStore().commitBatchOperation(batchOperation); } + assertEquals(initialSnapshotSetPropertyCount + snapshotUpdateSizeRequests.size(), + omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount, omMetrics.getNumSnapshotSetPropertyFails()); // Check if the exclusive size is set. try (TableIterator> iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { @@ -134,6 +144,42 @@ public void testValidateAndUpdateCache() throws IOException { } } + /** + * This test is mainly to validate metrics and error code. + */ + @Test + public void testValidateAndUpdateCacheFailure() throws IOException { + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + + createSnapshotDataForTest(); + assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); + + OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); + Table mockedSnapshotInfoTable = mock(Table.class); + + when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); + when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + + for (OMRequest omRequest: snapshotUpdateSizeRequests) { + OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(omRequest); + OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(ozoneManager); + omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); + + // Validate and Update Cache + OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) + omSnapshotSetPropertyRequest.validateAndUpdateCache(ozoneManager, 200L); + + assertEquals(INTERNAL_ERROR, omSnapshotSetPropertyResponse.getOMResponse().getStatus()); + } + + assertEquals(initialSnapshotSetPropertyCount, omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount + snapshotUpdateSizeRequests.size(), + omMetrics.getNumSnapshotSetPropertyFails()); + } + private void assertCacheValues(String dbKey) { CacheValue cacheValue = omMetadataManager .getSnapshotInfoTable() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java index 811e13ac173e..7d6487493861 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java @@ -20,6 +20,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.Table; @@ -82,7 +84,7 @@ public void testAddToDBBatch() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).build(); ThreadLocalRandom random = ThreadLocalRandom.current(); long usedNamespace = Math.abs(random.nextLong(Long.MAX_VALUE)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java index c7e2c265b7bb..c639c77c08e3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java @@ -26,7 +26,6 @@ import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -41,11 +40,11 @@ public class TestOMFileCreateResponseWithFSO extends TestOMKeyCreateResponse { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - omBucketInfo.getBucketName(), keyName, replicationType, - replicationFactor, - omBucketInfo.getObjectID() + 1, - omBucketInfo.getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) + .setObjectID(omBucketInfo.getObjectID() + 1) + .setParentObjectID(omBucketInfo.getObjectID()) + .setUpdateID(100L) + .build(); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java index e5a6b0ab14f5..88ef2964d17e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java @@ -92,7 +92,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { protected OmKeyInfo createOmKeyInfo() throws Exception { return OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); + bucketName, keyName, replicationConfig).build(); } protected String getOpenKey() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java index 85e9354ca8c9..b574b8548132 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java @@ -18,18 +18,19 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + /** * Tests OMAllocateBlockResponse - prefix layout. */ @@ -49,12 +50,11 @@ protected OmKeyInfo createOmKeyInfo() throws Exception { long txnId = 50; long objectId = parentID + 1; - OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now()); - return omKeyInfoFSO; + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index 25b2f6c1050f..762401bc57bf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -81,7 +81,7 @@ public void testAddToDBBatch() throws Exception { public void testAddToDBBatchNoOp() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); + bucketName, keyName, replicationConfig).build(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse( @@ -135,7 +135,7 @@ public void testAddToDBBatchOnOverwrite() throws Exception { @Nonnull protected void addKeyToOpenKeyTable() throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); + clientID, replicationConfig, omMetadataManager); } @Nonnull @@ -158,6 +158,6 @@ protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, new RepeatedOmKeyInfo(e))); } return new OMKeyCommitResponse(omResponse, omKeyInfo, ozoneKey, openKey, - omBucketInfo, deleteKeyMap, isHSync, newOpenKeyInfo); + omBucketInfo, deleteKeyMap, isHSync, newOpenKeyInfo, null, null); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java index 9c3f8c1143e3..e3d88400396c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java @@ -18,17 +18,19 @@ package org.apache.hadoop.ozone.om.response.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; + import java.util.HashMap; import java.util.Map; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -55,18 +57,18 @@ protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, new RepeatedOmKeyInfo(e))); } return new OMKeyCommitResponseWithFSO(omResponse, omKeyInfo, ozoneKey, - openKey, omBucketInfo, deleteKeyMap, volumeId, isHSync, newOpenKeyInfo); + openKey, omBucketInfo, deleteKeyMap, volumeId, isHSync, newOpenKeyInfo, null, null); } @Nonnull @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - omBucketInfo.getBucketName(), keyName, replicationType, - replicationFactor, - omBucketInfo.getObjectID() + 1, - omBucketInfo.getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) + .setObjectID(omBucketInfo.getObjectID() + 1) + .setParentObjectID(omBucketInfo.getObjectID()) + .setUpdateID(100L) + .build(); } @Nonnull @@ -77,11 +79,11 @@ protected void addKeyToOpenKeyTable() throws Exception { long objectId = parentID + 10; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, - Time.now()); - + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(100L) + .build(); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java index ee83f3671277..53d86e667367 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java @@ -18,13 +18,15 @@ package org.apache.hadoop.ozone.om.response.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -50,11 +52,12 @@ protected String getOpenKeyName() throws IOException { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - omBucketInfo.getBucketName(), keyName, replicationType, - replicationFactor, - omBucketInfo.getObjectID() + 1, - omBucketInfo.getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(omBucketInfo.getObjectID() + 1) + .setParentObjectID(omBucketInfo.getObjectID()) + .setUpdateID(100L) + .build(); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index a000c3f9694e..8031ead68f18 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -22,7 +22,6 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.utils.db.Table; @@ -89,8 +88,7 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setReplicationConfig(RatisReplicationConfig - .getInstance(replicationFactor)) + .setReplicationConfig(replicationConfig) .setNodes(new ArrayList<>()) .build(); @@ -167,7 +165,7 @@ protected String addKeyToTable() throws Exception { keyName); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); + clientID, replicationConfig, omMetadataManager); return ozoneKey; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java index 588907c6ce88..38f5438e9877 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java @@ -18,13 +18,14 @@ package org.apache.hadoop.ozone.om.response.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; /** * Tests OMKeyDeleteResponse - prefix layout. @@ -50,11 +51,11 @@ protected String addKeyToTable() throws Exception { bucketName, "", omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); return omMetadataManager.getOzonePathKey( @@ -66,11 +67,12 @@ protected String addKeyToTable() throws Exception { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(getOmBucketInfo()); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - getOmBucketInfo().getBucketName(), keyName, replicationType, - replicationFactor, - getOmBucketInfo().getObjectID() + 1, - getOmBucketInfo().getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, + replicationConfig) + .setObjectID(getOmBucketInfo().getObjectID() + 1) + .setParentObjectID(getOmBucketInfo().getObjectID()) + .setUpdateID(100L) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java index 2dcef56330f2..07c094cc98a1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java @@ -154,12 +154,10 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { } protected OmKeyInfo getOmKeyInfo(String keyName) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, 0L); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); } - protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, - String keyName) { + protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { return getOmKeyInfo(keyName); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java index f2f9ccaf872e..edbb50d66f86 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java @@ -18,17 +18,17 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; -import org.apache.hadoop.util.Time; import java.io.IOException; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; /** @@ -38,19 +38,21 @@ public class TestOMKeyRenameResponseWithFSO extends TestOMKeyRenameResponse { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - bucketId + 100L, bucketId + 101L, 0L, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(bucketId + 100) + .setParentObjectID(bucketId + 101) + .build(); } @Override protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { - return OMRequestTestUtils.createOmKeyInfo(toKeyInfo.getVolumeName(), - toKeyInfo.getBucketName(), keyName, replicationType, - replicationFactor, toKeyInfo.getObjectID(), - toKeyInfo.getParentObjectID(), 0L, toKeyInfo.getCreationTime()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(toKeyInfo.getObjectID()) + .setParentObjectID(toKeyInfo.getParentObjectID()) + .setUpdateID(0L) + .setCreationTime(toKeyInfo.getCreationTime()) + .build(); } @Override @@ -80,12 +82,12 @@ protected void createParent() { long bucketId = random.nextLong(); String fromKeyParentName = UUID.randomUUID().toString(); String toKeyParentName = UUID.randomUUID().toString(); - fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, replicationType, replicationFactor, - bucketId + 100L); - toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, toKeyParentName, replicationType, replicationFactor, - bucketId + 101L); + fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fromKeyParentName, replicationConfig) + .setObjectID(bucketId + 100L) + .build(); + toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, toKeyParentName, replicationConfig) + .setObjectID(bucketId + 101L) + .build(); fromKeyParent.setParentObjectID(bucketId); toKeyParent.setParentObjectID(bucketId); fromKeyParent.setFileName(OzoneFSUtils.getFileName( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java index 1cbf5c6d0b2d..bc4c34bd0db3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java @@ -24,6 +24,7 @@ import java.util.Random; import java.util.UUID; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -59,8 +60,7 @@ public class TestOMKeyResponse { protected String volumeName; protected String bucketName; protected String keyName; - protected HddsProtos.ReplicationFactor replicationFactor; - protected HddsProtos.ReplicationType replicationType; + protected ReplicationConfig replicationConfig; protected OmBucketInfo omBucketInfo; protected long clientID; protected Random random; @@ -78,18 +78,18 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationFactor = HddsProtos.ReplicationFactor.ONE; - replicationType = HddsProtos.ReplicationType.RATIS; + replicationConfig = ReplicationConfig.fromProtoTypeAndFactor( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); clientID = 1000L; random = new Random(); keysToDelete = null; final OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() - .setVolume(volumeName) - .setAdminName("admin") - .setOwnerName("owner") - .setObjectID(System.currentTimeMillis()) - .build(); + .setVolume(volumeName) + .setAdminName("admin") + .setOwnerName("owner") + .setObjectID(System.currentTimeMillis()) + .build(); omMetadataManager.getVolumeTable().addCacheEntry( new CacheKey<>(omMetadataManager.getVolumeKey(volumeName)), @@ -117,8 +117,7 @@ protected String getOpenKeyName() throws IOException { @Nonnull protected OmKeyInfo getOmKeyInfo() { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index 60f371ba1f88..e7689e52ed7d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -32,7 +33,6 @@ import java.util.List; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; @@ -64,7 +64,7 @@ protected void createPreRequisities() throws Exception { for (int i = 0; i < 10; i++) { keyName = parent.concat(key + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, keyName, 0L, RATIS, THREE, omMetadataManager); + bucketName, keyName, 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); omKeyInfoList .add(omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey)); @@ -104,7 +104,7 @@ public void testKeysDeleteResponse() throws Exception { protected OMClientResponse getOmKeysDeleteResponse(OMResponse omResponse, OmBucketInfo omBucketInfo) { return new OMKeysDeleteResponse( - omResponse, omKeyInfoList, true, omBucketInfo, Collections.emptyList()); + omResponse, omKeyInfoList, true, omBucketInfo, Collections.emptyMap()); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java index 148a4e28c1b2..13e706757f92 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -41,6 +40,7 @@ import java.util.Collections; import java.util.List; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -94,10 +94,11 @@ protected void createPreRequisities() throws Exception { keyName = keyPrefix + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, dirId + 1, buckId, - dirId + 1, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(dirId + 1) + .setParentObjectID(buckId) + .setUpdateID(dirId + 1) + .build(); ozoneDBKey = OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); @@ -111,7 +112,7 @@ protected OMClientResponse getOmKeysDeleteResponse(OMResponse omResponse, OmBucketInfo omBucketInfo) { return new OMKeysDeleteResponseWithFSO( omResponse, getOmKeyInfoList(), dirDeleteList, true, omBucketInfo, - volId, Collections.emptyList()); + volId, Collections.emptyMap()); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java index 0824f7c33de7..72a76a1aca4f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -32,7 +33,6 @@ import java.util.Map; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -117,7 +117,8 @@ private void createPreRequisities() throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, parentDir.concat("/key" + i), 0L, RATIS, THREE, + bucketName, parentDir.concat("/key" + i), 0L, + RatisReplicationConfig.getInstance(THREE), omMetadataManager); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java index f4f0e729f05d..c9a4109809ed 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java @@ -208,7 +208,7 @@ private Map addOpenKeysToDB(String volume, int numKeys, long parentID = random.nextLong(); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, key, replicationType, replicationFactor); + bucket, key, replicationConfig).build(); if (keyLength > 0) { OMRequestTestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java index b12087785b1f..5ebd2e6fa1cb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java @@ -17,8 +17,12 @@ package org.apache.hadoop.ozone.om.response.key.acl.prefix; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.PrefixManagerImpl; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -36,6 +40,8 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Tests TestOMPrefixAclResponse. @@ -45,9 +51,9 @@ public class TestOMPrefixAclResponse extends TestOMKeyResponse { @Test public void testAddToDBBatch() throws Exception { final OzoneAcl user1 = new OzoneAcl(USER, "user1", - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); final OzoneAcl user2 = new OzoneAcl(USER, "user2", - ACLType.WRITE, ACCESS); + ACCESS, ACLType.WRITE); final String prefixName = "/vol/buck/prefix/"; List acls = Arrays.asList(user1, user2); @@ -77,13 +83,22 @@ public void testAddToDBBatch() throws Exception { .getSkipCache(prefixName); assertEquals(omPrefixInfo, persistedPrefixInfo); + String volumeName = "vol"; + String bucketName = "buck"; + + OzoneManager ozoneManager = mock(OzoneManager.class); + when(ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName))) + .thenReturn(new ResolvedBucket(volumeName, bucketName, volumeName, + bucketName, "", BucketLayout.DEFAULT)); + + // Verify that in-memory Prefix Tree (Radix Tree) is able to reload from // DB successfully PrefixManagerImpl prefixManager = - new PrefixManagerImpl(omMetadataManager, true); + new PrefixManagerImpl(ozoneManager, omMetadataManager, true); OzoneObj prefixObj = OzoneObjInfo.Builder.newBuilder() - .setVolumeName("vol") - .setBucketName("buck") + .setVolumeName(volumeName) + .setBucketName(bucketName) .setPrefixName("prefix/") .setResType(OzoneObj.ResourceType.PREFIX) .setStoreType(OzoneObj.StoreType.OZONE) @@ -123,7 +138,7 @@ public void testAddToDBBatch() throws Exception { // Reload prefix tree from DB and validate again. prefixManager = - new PrefixManagerImpl(omMetadataManager, true); + new PrefixManagerImpl(ozoneManager, omMetadataManager, true); prefixInfo = prefixManager.getPrefixInfo(prefixObj); assertEquals(2L, prefixInfo.getUpdateID()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java index b356dddd6b57..35600c331f3f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java @@ -19,15 +19,19 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartAbortInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; @@ -277,10 +281,10 @@ private Map> addMPUsToDB( OmBucketInfo omBucketInfo = OMRequestTestUtils.addBucketToDB(volume, bucket, omMetadataManager, getBucketLayout()); - final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, Time.now(), true); + ReplicationConfig replicationConfig = RatisReplicationConfig.getInstance(ONE); + final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, replicationConfig, + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .build(); if (getBucketLayout().equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { omKeyInfo.setParentObjectID(omBucketInfo.getObjectID()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index 23b543b6ec12..a2192ddb880c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -287,7 +287,7 @@ public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseFSO( .setStatus(status).setSuccess(true) .setCommitMultiPartUploadResponse( OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse - .newBuilder().setPartName(volumeName)).build(); + .newBuilder().setETag(volumeName).setPartName(volumeName)).build(); return new S3MultipartUploadCommitPartResponseWithFSO(omResponse, multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, @@ -306,6 +306,8 @@ public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseFSO( String multipartKey = omMetadataManager .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID); + OmMultipartKeyInfo multipartKeyInfo = omMetadataManager + .getMultipartInfoTable().get(multipartKey); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omMetadataManager.getBucketId(volumeName, @@ -324,7 +326,8 @@ public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseFSO( return new S3MultipartUploadCompleteResponseWithFSO(omResponse, multipartKey, multipartOpenKey, omKeyInfo, allKeyInfoToRemove, - getBucketLayout(), omBucketInfo, volumeId, bucketId); + getBucketLayout(), omBucketInfo, volumeId, bucketId, null, + multipartKeyInfo); } protected S3InitiateMultipartUploadResponse getS3InitiateMultipartUploadResp( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java index 47aa641c1ebb..e7a570350cff 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java @@ -18,14 +18,17 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -85,14 +88,16 @@ public void testAddDBToBatch() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); String dbOpenKey = omMetadataManager.getOpenFileName(volumeId, bucketId, - parentID, fileName, clientId); + parentID, fileName, clientId); String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, - parentID, fileName); + parentID, fileName); OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now(), true); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -175,9 +180,11 @@ public void testAddDBToBatchWithNullBucketInfo() throws Exception { parentID, fileName); OmKeyInfo omKeyInfoFSO = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now(), true); + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -244,20 +251,20 @@ public void testAddDBToBatchWithPartsWithKeyInDeleteTable() throws Exception { String keyName = getKeyName(); OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); + omMetadataManager); createParentPath(volumeName, bucketName); // Put an entry to delete table with the same key prior to multipart commit - OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentID + 8, - parentID, 8, Time.now(), true); + OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + 8) + .setParentObjectID(parentID) + .setUpdateID(8) + .build(); RepeatedOmKeyInfo prevKeys = new RepeatedOmKeyInfo(prevKey); String ozoneKey = omMetadataManager - .getOzoneKey(prevKey.getVolumeName(), - prevKey.getBucketName(), prevKey.getFileName()); + .getOzoneKey(prevKey.getVolumeName(), + prevKey.getBucketName(), prevKey.getFileName()); omMetadataManager.getDeletedTable().put(ozoneKey, prevKeys); long oId = runAddDBToBatchWithParts(volumeName, bucketName, keyName, 1); @@ -312,11 +319,12 @@ private long runAddDBToBatchWithParts(String volumeName, omMetadataManager.getBucketTable().get(bucketKey); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentID + 9, - parentID, 100, Time.now(), true); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + 9) + .setParentObjectID(parentID) + .setUpdateID(100) + .build(); List unUsedParts = new ArrayList<>(); unUsedParts.add(omKeyInfo); S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java index 9ae0a395e906..70dd23a7b047 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java @@ -36,12 +36,11 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import java.io.IOException; import java.util.UUID; import java.nio.file.Path; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.fail; /** * This class tests OMVolumeCreateResponse. @@ -115,7 +114,7 @@ public void testAddToDBBatch() throws Exception { } @Test - public void testAddToDBBatchNoOp() throws Exception { + public void testAddToDBBatchNoOp() { OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) @@ -126,15 +125,7 @@ public void testAddToDBBatchNoOp() throws Exception { OMVolumeDeleteResponse omVolumeDeleteResponse = new OMVolumeDeleteResponse( omResponse); - - try { - omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, - batchOperation); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } - + assertDoesNotThrow(() -> omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation)); } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java index c8a3faae4cca..8dcb030d637a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java @@ -23,9 +23,10 @@ import java.io.IOException; import java.nio.file.Path; import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; import org.apache.hadoop.ozone.om.KeyManager; @@ -47,6 +48,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -129,10 +131,11 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { for (int i = 0; i < 2000; ++i) { String keyName = "key" + longName + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, - keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, dir1.getObjectID() + 1 + i, - dir1.getObjectID(), 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(dir1.getObjectID() + 1 + i) + .setParentObjectID(dir1.getObjectID()) + .setUpdateID(100L) + .build(); OMRequestTestUtils.addFileToKeyTable(false, true, keyName, omKeyInfo, 1234L, i + 1, om.getMetadataManager()); } @@ -143,7 +146,7 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { .setBucketName(bucketName) .setKeyName("dir" + longName) .setReplicationConfig(StandaloneReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) + ONE)) .setDataSize(0).setRecursive(true) .build(); writeClient.deleteKey(delArgs); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 77bf15ed76b1..cf538f581c78 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -31,12 +31,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import com.google.common.collect.ImmutableMap; +import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -52,7 +53,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.test.OzoneTestBase; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; @@ -82,12 +82,11 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.fail; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -359,10 +358,9 @@ void testSnapshotDeepClean() throws Exception { keyDeletingService.resume(); - try (ReferenceCounted rcOmSnapshot = - om.getOmSnapshotManager().checkForSnapshot( - volumeName, bucketName, getSnapshotPrefix(snap3), true)) { - OmSnapshot snapshot3 = (OmSnapshot) rcOmSnapshot.get(); + try (ReferenceCounted rcOmSnapshot = + om.getOmSnapshotManager().getSnapshot(volumeName, bucketName, snap3)) { + OmSnapshot snapshot3 = rcOmSnapshot.get(); Table snap3deletedTable = snapshot3.getMetadataManager().getDeletedTable(); @@ -630,15 +628,13 @@ private static void assertTableRowCount(Table table, private static boolean assertTableRowCount(long expectedCount, Table table, OMMetadataManager metadataManager) { - long count = 0L; - try { - count = metadataManager.countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(metadataManager.countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } private void createVolumeAndBucket(String volumeName, @@ -711,6 +707,7 @@ private OmKeyArgs createAndCommitKey(String volumeName, .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1000L) .setLocationInfoList(new ArrayList<>()) + .setOwnerName("user" + RandomStringUtils.randomNumeric(5)) .build(); //Open and Commit the Key in the Key Manager. OpenKeySession session = writeClient.openKey(keyArg); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java index 762d8740565f..37c90c3767cd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java @@ -19,11 +19,13 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmTestManagers; @@ -37,6 +39,7 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -210,6 +213,7 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setAcls(Collections.emptyList()) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .setLocationInfoList(new ArrayList<>()) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); OmMultipartInfo omMultipartInfo = writeClient. @@ -228,6 +232,7 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setAcls(Collections.emptyList()) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); OpenKeySession openKey = writeClient.openKey(partKeyArgs); @@ -241,6 +246,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setMultipartUploadID(omMultipartInfo.getUploadID()) .setMultipartUploadPartNumber(i) .setAcls(Collections.emptyList()) + .addMetadata(OzoneConsts.ETAG, + DigestUtils.md5Hex(UUID.randomUUID().toString())) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) .setLocationInfoList(Collections.emptyList()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index 30fe6f6ffb0e..eeb6f2c71ead 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -34,6 +35,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.ExpiredOpenKeys; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -49,6 +51,7 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; @@ -169,8 +172,8 @@ public void testCleanupExpiredOpenKeys( long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned(); long numOpenKeysHSyncCleaned = metrics.getNumOpenKeysHSyncCleaned(); final int keyCount = numDEFKeys + numFSOKeys; - createOpenKeys(numDEFKeys, false, BucketLayout.DEFAULT, false); - createOpenKeys(numFSOKeys, hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED, false); + createOpenKeys(numDEFKeys, false, BucketLayout.DEFAULT, false, false); + createOpenKeys(numFSOKeys, hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED, false, false); // wait for open keys to expire Thread.sleep(EXPIRE_THRESHOLD_MS); @@ -237,9 +240,9 @@ public void testIgnoreExpiredRecoverhsyncKeys() throws Exception { when(om.getScmClient().getContainerClient().getContainerWithPipeline(anyLong())) .thenReturn(new ContainerWithPipeline(Mockito.mock(ContainerInfo.class), pipeline)); - createOpenKeys(keyCount, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, false); + createOpenKeys(keyCount, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, false, false); // create 2 more key and mark recovery flag set - createOpenKeys(2, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, true); + createOpenKeys(2, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, true, false); // wait for open keys to expire Thread.sleep(EXPIRE_THRESHOLD_MS); @@ -255,7 +258,51 @@ public void testIgnoreExpiredRecoverhsyncKeys() throws Exception { waitForOpenKeyCleanup(true, BucketLayout.FILE_SYSTEM_OPTIMIZED); // 2 keys should still remain in openKey table - assertEquals(2, getOpenKeyInfo(BucketLayout.FILE_SYSTEM_OPTIMIZED).size()); + assertEquals(2, getKeyInfo(BucketLayout.FILE_SYSTEM_OPTIMIZED, true).size()); + } + + @Test + public void testCommitExpiredHsyncKeys() throws Exception { + OpenKeyCleanupService openKeyCleanupService = + (OpenKeyCleanupService) keyManager.getOpenKeyCleanupService(); + + openKeyCleanupService.suspend(); + // wait for submitted tasks to complete + Thread.sleep(SERVICE_INTERVAL); + + int keyCount = 10; + Pipeline pipeline = Pipeline.newBuilder() + .setState(Pipeline.PipelineState.OPEN) + .setId(PipelineID.randomId()) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)) + .setNodes(new ArrayList<>()) + .build(); + + when(om.getScmClient().getContainerClient().getContainerWithPipeline(anyLong())) + .thenReturn(new ContainerWithPipeline(Mockito.mock(ContainerInfo.class), pipeline)); + + // Create 5 keys with directories + createOpenKeys(keyCount / 2, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, false, true); + // Create 5 keys without directory + createOpenKeys(keyCount / 2, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, false, false); + + // wait for open keys to expire + Thread.sleep(EXPIRE_THRESHOLD_MS); + + // 10 keys should be returned after hard limit period + assertEquals(keyCount, getExpiredOpenKeys(true, BucketLayout.FILE_SYSTEM_OPTIMIZED)); + assertExpiredOpenKeys(false, true, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + openKeyCleanupService.resume(); + + // keys should be recovered and there should not be any expired key pending + waitForOpenKeyCleanup(true, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + List listKeyInfo = getKeyInfo(BucketLayout.FILE_SYSTEM_OPTIMIZED, false); + // Verify keyName and fileName is same after auto commit key. + listKeyInfo.stream().forEach(key -> assertEquals(key.getKeyName(), key.getFileName())); } /** @@ -406,17 +453,20 @@ private int getExpiredOpenKeys(boolean hsync, BucketLayout layout) { } } - private List getOpenKeyInfo(BucketLayout bucketLayout) { + private List getKeyInfo(BucketLayout bucketLayout, boolean openKey) { List omKeyInfo = new ArrayList<>(); - Table openFileTable = - om.getMetadataManager().getOpenKeyTable(bucketLayout); + Table fileTable; + if (openKey) { + fileTable = om.getMetadataManager().getOpenKeyTable(bucketLayout); + } else { + fileTable = om.getMetadataManager().getKeyTable(bucketLayout); + } try (TableIterator> - iterator = openFileTable.iterator()) { + iterator = fileTable.iterator()) { while (iterator.hasNext()) { omKeyInfo.add(iterator.next().getValue()); } - } catch (Exception e) { } return omKeyInfo; @@ -430,7 +480,7 @@ void waitForOpenKeyCleanup(boolean hsync, BucketLayout layout) } private void createOpenKeys(int keyCount, boolean hsync, - BucketLayout bucketLayout, boolean recovery) throws IOException { + BucketLayout bucketLayout, boolean recovery, boolean withDir) throws IOException { String volume = UUID.randomUUID().toString(); String bucket = UUID.randomUUID().toString(); for (int x = 0; x < keyCount; x++) { @@ -440,7 +490,7 @@ private void createOpenKeys(int keyCount, boolean hsync, volume = UUID.randomUUID().toString(); } } - String key = UUID.randomUUID().toString(); + String key = withDir ? "dir1/dir2/" + UUID.randomUUID() : UUID.randomUUID().toString(); createVolumeAndBucket(volume, bucket, bucketLayout); final int numBlocks = RandomUtils.nextInt(1, 3); @@ -479,6 +529,8 @@ private void createOpenKey(String volumeName, String bucketName, .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setLocationInfoList(new ArrayList<>()) + .setOwnerName(UserGroupInformation.getCurrentUser() + .getShortUserName()) .build(); // Open and write the key without commit it. @@ -535,6 +587,7 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setLocationInfoList(new ArrayList<>()) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); OmMultipartInfo omMultipartInfo = writeClient. @@ -553,6 +606,7 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setAcls(Collections.emptyList()) .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); OpenKeySession openKey = writeClient.openKey(partKeyArgs); @@ -570,6 +624,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setLocationInfoList(Collections.emptyList()) + .addMetadata(OzoneConsts.ETAG, DigestUtils.md5Hex(UUID.randomUUID() + .toString())) .build(); writeClient.commitMultipartUploadPart(commitPartKeyArgs, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java index 5ac7835f8ce6..1a0db1183311 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java @@ -19,8 +19,11 @@ package org.apache.hadoop.ozone.om.service; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -48,8 +51,7 @@ public void testQuotaRepair() throws Exception { String parentDir = "/user"; for (int i = 0; i < count; i++) { OMRequestTestUtils.addKeyToTableAndCache(volumeName, bucketName, - parentDir.concat("/key" + i), -1, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, 150 + i, omMetadataManager); + parentDir.concat("/key" + i), -1, RatisReplicationConfig.getInstance(THREE), 150 + i, omMetadataManager); } String fsoBucketName = "fso" + bucketName; @@ -59,12 +61,13 @@ public void testQuotaRepair() throws Exception { fsoBucketName, "c/d/e", omMetadataManager); for (int i = 0; i < count; i++) { String fileName = "file1" + i; - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo( - volumeName, fsoBucketName, fileName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1 + i, - parentId, 100 + i, Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, fsoBucketName, fileName, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1 + i) + .setParentObjectID(parentId) + .setUpdateID(100L + i) + .build(); omKeyInfo.setKeyName(fileName); OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50 + i, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java index cecd7a99af2b..8546690fa0ae 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java @@ -18,9 +18,8 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.cache.CacheLoader; -import org.apache.hadoop.ozone.om.IOmMetadataReader; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -33,15 +32,15 @@ import org.slf4j.event.Level; import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -52,23 +51,22 @@ class TestSnapshotCache { private static final int CACHE_SIZE_LIMIT = 3; - private static OmSnapshotManager omSnapshotManager; - private static CacheLoader cacheLoader; + private static CacheLoader cacheLoader; private SnapshotCache snapshotCache; + private OMMetrics omMetrics; + @BeforeAll static void beforeAll() throws Exception { - omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.isSnapshotStatus(any(), eq(SNAPSHOT_ACTIVE))) - .thenReturn(true); cacheLoader = mock(CacheLoader.class); // Create a difference mock OmSnapshot instance each time load() is called when(cacheLoader.load(any())).thenAnswer( (Answer) invocation -> { final OmSnapshot omSnapshot = mock(OmSnapshot.class); // Mock the snapshotTable return value for the lookup inside release() - final String dbKey = (String) invocation.getArguments()[0]; - when(omSnapshot.getSnapshotTableKey()).thenReturn(dbKey); + final UUID snapshotID = (UUID) invocation.getArguments()[0]; + when(omSnapshot.getSnapshotTableKey()).thenReturn(snapshotID.toString()); + when(omSnapshot.getSnapshotID()).thenReturn(snapshotID); return omSnapshot; } @@ -81,8 +79,8 @@ static void beforeAll() throws Exception { @BeforeEach void setUp() { // Reset cache for each test case - snapshotCache = new SnapshotCache( - omSnapshotManager, cacheLoader, CACHE_SIZE_LIMIT); + omMetrics = OMMetrics.create(); + snapshotCache = new SnapshotCache(cacheLoader, CACHE_SIZE_LIMIT, omMetrics, 50); } @AfterEach @@ -92,115 +90,108 @@ void tearDown() { } @Test - @DisplayName("01. get()") + @DisplayName("get()") void testGet() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + assertEquals(0, omMetrics.getNumSnapshotCacheSize()); + ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertNotNull(omSnapshot.get()); assertInstanceOf(OmSnapshot.class, omSnapshot.get()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } @Test - @DisplayName("02. get() same entry twice yields one cache entry only") + @DisplayName("get() same entry twice yields one cache entry only") void testGetTwice() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - ReferenceCounted omSnapshot1again = - snapshotCache.get(dbKey1); + ReferenceCounted omSnapshot1again = snapshotCache.get(dbKey1); // Should be the same instance assertEquals(omSnapshot1, omSnapshot1again); assertEquals(omSnapshot1.get(), omSnapshot1again.get()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } @Test - @DisplayName("03. release(String)") + @DisplayName("release(String)") void testReleaseByDbKey() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertNotNull(omSnapshot1.get()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } @Test - @DisplayName("04. release(OmSnapshot)") - void testReleaseByOmSnapshotInstance() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); - assertNotNull(omSnapshot1); - assertEquals(1, snapshotCache.size()); - - snapshotCache.release((OmSnapshot) omSnapshot1.get()); - // Entry will not be immediately evicted - assertEquals(1, snapshotCache.size()); - } - - @Test - @DisplayName("05. invalidate()") + @DisplayName("invalidate()") void testInvalidate() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.invalidate(dbKey1); assertEquals(0, snapshotCache.size()); + assertEquals(0, omMetrics.getNumSnapshotCacheSize()); } @Test - @DisplayName("06. invalidateAll()") + @DisplayName("invalidateAll()") void testInvalidateAll() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - final String dbKey2 = "dbKey2"; - ReferenceCounted omSnapshot2 = - snapshotCache.get(dbKey2); + final UUID dbKey2 = UUID.randomUUID(); + ReferenceCounted omSnapshot2 = snapshotCache.get(dbKey2); assertNotNull(omSnapshot2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); // Should be difference omSnapshot instances assertNotEquals(omSnapshot1, omSnapshot2); - final String dbKey3 = "dbKey3"; - ReferenceCounted omSnapshot3 = - snapshotCache.get(dbKey3); + final UUID dbKey3 = UUID.randomUUID(); + ReferenceCounted omSnapshot3 = snapshotCache.get(dbKey3); assertNotNull(omSnapshot3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); snapshotCache.invalidate(dbKey1); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); snapshotCache.invalidateAll(); assertEquals(0, snapshotCache.size()); + assertEquals(0, omMetrics.getNumSnapshotCacheSize()); } - private void assertEntryExistence(String key, boolean shouldExist) { + private void assertEntryExistence(UUID key, boolean shouldExist) { if (shouldExist) { snapshotCache.getDbMap().computeIfAbsent(key, k -> { fail(k + " should not have been evicted"); @@ -215,108 +206,132 @@ private void assertEntryExistence(String key, boolean shouldExist) { } @Test - @DisplayName("07. Basic cache eviction") - void testEviction1() throws IOException { + @DisplayName("Basic cache eviction") + void testEviction1() throws IOException, InterruptedException, TimeoutException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - final String dbKey2 = "dbKey2"; + final UUID dbKey2 = UUID.randomUUID(); snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); - final String dbKey3 = "dbKey3"; + final UUID dbKey3 = UUID.randomUUID(); snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); - final String dbKey4 = "dbKey4"; + final UUID dbKey4 = UUID.randomUUID(); snapshotCache.get(dbKey4); - // dbKey1, dbKey2 and dbKey3 would have been evicted by the end of the last get() because - // those were release()d. + // dbKey1, dbKey2 and dbKey3 would have been evicted by the end of the last scheduled cleanup() because + // those were released. + GenericTestUtils.waitFor(() -> snapshotCache.size() == 1, 50, 3000); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); assertEntryExistence(dbKey1, false); } @Test - @DisplayName("08. Cache eviction while exceeding soft limit") - void testEviction2() throws IOException { + @DisplayName("Cache eviction while exceeding soft limit") + void testEviction2() throws IOException, InterruptedException, TimeoutException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - final String dbKey2 = "dbKey2"; + final UUID dbKey2 = UUID.randomUUID(); snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); - final String dbKey3 = "dbKey3"; + final UUID dbKey3 = UUID.randomUUID(); snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); - final String dbKey4 = "dbKey4"; + final UUID dbKey4 = UUID.randomUUID(); snapshotCache.get(dbKey4); // dbKey1 would not have been evicted because it is not release()d assertEquals(4, snapshotCache.size()); + assertEquals(4, omMetrics.getNumSnapshotCacheSize()); assertEntryExistence(dbKey1, true); // Releasing dbKey2 at this point should immediately trigger its eviction // because the cache size exceeded the soft limit snapshotCache.release(dbKey2); + GenericTestUtils.waitFor(() -> snapshotCache.size() == 3, 50, 3000); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); assertEntryExistence(dbKey2, false); assertEntryExistence(dbKey1, true); } @Test - @DisplayName("09. Cache eviction with try-with-resources") - void testEviction3WithClose() throws IOException { + @DisplayName("Cache eviction with try-with-resources") + void testEviction3WithClose() throws IOException, InterruptedException, TimeoutException { - final String dbKey1 = "dbKey1"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey1)) { + final UUID dbKey1 = UUID.randomUUID(); + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey1)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } // ref count should have been decreased because it would be close()d // upon exiting try-with-resources. assertEquals(0L, snapshotCache.getDbMap().get(dbKey1).getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - final String dbKey2 = "dbKey2"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey2)) { + final UUID dbKey2 = UUID.randomUUID(); + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey2)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); // Get dbKey2 entry a second time - try (ReferenceCounted rcOmSnapshot2 = snapshotCache.get(dbKey2)) { + try (ReferenceCounted rcOmSnapshot2 = snapshotCache.get(dbKey2)) { assertEquals(2L, rcOmSnapshot.getTotalRefCount()); assertEquals(2L, rcOmSnapshot2.getTotalRefCount()); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); } assertEquals(1L, rcOmSnapshot.getTotalRefCount()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey2).getTotalRefCount()); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); - final String dbKey3 = "dbKey3"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey3)) { + final UUID dbKey3 = UUID.randomUUID(); + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey3)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey3).getTotalRefCount()); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); - final String dbKey4 = "dbKey4"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey4)) { + final UUID dbKey4 = UUID.randomUUID(); + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey4)) { + GenericTestUtils.waitFor(() -> snapshotCache.size() == 1, 50, 3000); assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey4).getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotChain.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java similarity index 98% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotChain.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java index 1821b6f9af32..c5ae809718e7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotChain.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java @@ -15,11 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.BeforeEach; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index b92546c2899b..1f2380f6fa77 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -21,12 +21,10 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.RDBStore; @@ -35,11 +33,9 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -68,7 +64,6 @@ import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ozone.rocksdiff.RocksDiffUtils; -import org.apache.ozone.test.tag.Unhealthy; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.TimeDuration; import jakarta.annotation.Nonnull; @@ -134,10 +129,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; import static org.apache.hadoop.ozone.om.OmSnapshotManager.SNAP_DIFF_JOB_TABLE_NAME; import static org.apache.hadoop.ozone.om.OmSnapshotManager.SNAP_DIFF_REPORT_TABLE_NAME; @@ -217,9 +208,6 @@ public class TestSnapshotDiffManager { private OzoneManager ozoneManager; @Mock private OzoneConfiguration configuration; - - private SnapshotCache snapshotCache; - @Mock private Table snapshotInfoTable; @Mock @@ -328,8 +316,8 @@ public void init() throws RocksDBException, IOException, ExecutionException { OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT, TimeUnit.MILLISECONDS)) .thenReturn(OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT); - when(configuration - .getBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, + when(configuration. + getBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT); when(configuration @@ -343,15 +331,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { .getInt(OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE, OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT); - when(configuration - .getInt(OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE, - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT)) - .thenReturn(OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT); - when(configuration - .getStorageSize(OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE, - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT, - StorageUnit.BYTES)) - .thenReturn(FileUtils.ONE_KB_BI.doubleValue()); when(configuration.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT); @@ -382,42 +361,46 @@ public void init() throws RocksDBException, IOException, ExecutionException { when(ozoneManager.getConfiguration()).thenReturn(configuration); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - CacheLoader loader = - new CacheLoader() { - @Nonnull - @Override - public OmSnapshot load(@Nonnull String key) { - return getMockedOmSnapshot(key); - } - }; - omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.isSnapshotStatus( - any(), any())).thenReturn(true); - snapshotCache = new SnapshotCache(omSnapshotManager, loader, 10); - + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + when(omSnapshotManager.isSnapshotStatus(any(), any())).thenReturn(true); + SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10, omMetrics, 0); + + when(omSnapshotManager.getActiveSnapshot(anyString(), anyString(), anyString())) + .thenAnswer(invocationOnMock -> { + SnapshotInfo snapInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, invocationOnMock.getArgument(0), + invocationOnMock.getArgument(1), invocationOnMock.getArgument(2)); + return snapshotCache.get(snapInfo.getSnapshotId()); + }); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, - snapshotCache, snapDiffJobTable, snapDiffReportTable, - columnFamilyOptions, codecRegistry); + snapDiffJobTable, snapDiffReportTable, columnFamilyOptions, codecRegistry); + when(omSnapshotManager.getDiffCleanupServiceInterval()).thenReturn(0L); + } + + private CacheLoader mockCacheLoader() { + return new CacheLoader() { + @Nonnull + @Override + public OmSnapshot load(@Nonnull UUID key) { + return getMockedOmSnapshot(key); + } + }; } @AfterEach public void tearDown() { - if (columnFamilyHandles != null) { - columnFamilyHandles.forEach(IOUtils::closeQuietly); - } - - IOUtils.closeQuietly(db); - IOUtils.closeQuietly(dbOptions); - IOUtils.closeQuietly(columnFamilyOptions); IOUtils.closeQuietly(snapshotDiffManager); + IOUtils.closeQuietly(columnFamilyHandles); + IOUtils.closeQuietly(db, dbOptions, columnFamilyOptions); } - private OmSnapshot getMockedOmSnapshot(String snapshot) { + private OmSnapshot getMockedOmSnapshot(UUID snapshotId) { OmSnapshot omSnapshot = mock(OmSnapshot.class); - when(omSnapshot.getName()).thenReturn(snapshot); + when(omSnapshot.getName()).thenReturn(snapshotId.toString()); when(omSnapshot.getMetadataManager()).thenReturn(omMetadataManager); when(omMetadataManager.getStore()).thenReturn(dbStore); + when(omSnapshot.getSnapshotID()).thenReturn(snapshotId); return omSnapshot; } @@ -432,6 +415,10 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap2)); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); String diffDir = snapDiffDir.getAbsolutePath(); Set randomStrings = IntStream.range(0, numberOfFiles) @@ -444,12 +431,12 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { eq(diffDir)) ).thenReturn(Lists.newArrayList(randomStrings)); - ReferenceCounted rcFromSnapshot = - snapshotCache.get(snap1.toString()); - ReferenceCounted rcToSnapshot = - snapshotCache.get(snap2.toString()); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + ReferenceCounted rcFromSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); + ReferenceCounted rcToSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap2); @@ -501,6 +488,10 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, }); UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap2)); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); if (!useFullDiff) { when(differ.getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), @@ -509,12 +500,12 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, .thenReturn(Collections.emptyList()); } - ReferenceCounted rcFromSnapshot = - snapshotCache.get(snap1.toString()); - ReferenceCounted rcToSnapshot = - snapshotCache.get(snap2.toString()); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + ReferenceCounted rcFromSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); + ReferenceCounted rcToSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); @@ -564,6 +555,10 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) }); UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap1)); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); doThrow(new FileNotFoundException("File not found exception.")) .when(differ) @@ -572,12 +567,12 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) any(DifferSnapshotInfo.class), anyString()); - ReferenceCounted rcFromSnapshot = - snapshotCache.get(snap1.toString()); - ReferenceCounted rcToSnapshot = - snapshotCache.get(snap2.toString()); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + ReferenceCounted rcFromSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); + ReferenceCounted rcToSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); @@ -653,15 +648,11 @@ public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, try (MockedConstruction mockedSSTFileReader = mockConstruction(SstFileSetReader.class, (mock, context) -> { - when(mock.getKeyStreamWithTombstone(any(), any(), any())) + when(mock.getKeyStreamWithTombstone(any(), any())) .thenReturn(keysIncludingTombstones.stream()); when(mock.getKeyStream(any(), any())) .thenReturn(keysExcludingTombstones.stream()); }); - MockedConstruction mockedSSTDumpTool = - mockConstruction(ManagedSSTDumpTool.class, - (mock, context) -> { - }) ) { Map toSnapshotTableMap = IntStream.concat(IntStream.range(0, 25), IntStream.range(50, 100)) @@ -679,9 +670,6 @@ public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, Table fromSnapshotTable = getMockedTable(fromSnapshotTableMap, snapshotTableName); - snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, - snapshotCache, snapDiffJobTable, snapDiffReportTable, - columnFamilyOptions, codecRegistry); SnapshotDiffManager spy = spy(snapshotDiffManager); doAnswer(invocation -> { @@ -828,7 +816,7 @@ public void testGenerateDiffReport() throws IOException { when(keyInfo.getKeyName()).thenReturn(i.getArgument(0)); when(keyInfo.isKeyInfoSame(any(OmKeyInfo.class), eq(false), eq(false), - eq(false), eq(false))) + eq(false), eq(false), eq(true))) .thenAnswer(k -> { int keyVal = Integer.parseInt(((String)i.getArgument(0)) .substring(3)); @@ -1566,7 +1554,6 @@ public void testGetSnapshotDiffReportHappyCase() throws Exception { * Tests that only QUEUED jobs are submitted to the executor and rest are * short-circuited based on previous one. */ - @Unhealthy @Test public void testGetSnapshotDiffReportJob() throws Exception { for (int i = 0; i < jobStatuses.size(); i++) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java similarity index 96% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotInfo.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java index 48f366371adf..dc00433e179b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java @@ -16,10 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; import org.apache.hadoop.util.Time; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java similarity index 77% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java index a8b026af05b5..31ca16481f4c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java @@ -16,7 +16,7 @@ * limitations under the License. * */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -26,6 +26,14 @@ import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OmTestManagers; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SstFilteringService; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -34,18 +42,18 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.util.ExitUtils; -import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.rocksdb.LiveFileMetaData; import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -71,24 +79,20 @@ /** * Test SST Filtering Service. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class TestSstFilteringService { - public static final String SST_FILE_EXTENSION = ".sst"; - @TempDir - private File folder; + private static final String SST_FILE_EXTENSION = ".sst"; private OzoneManagerProtocol writeClient; private OzoneManager om; private OzoneConfiguration conf; private KeyManager keyManager; + private short countTotalSnapshots = 0; @BeforeAll - public static void setup() { + void setup(@TempDir Path folder) throws Exception { ExitUtils.disableSystemExit(); - } - - @BeforeEach - void init() throws Exception { conf = new OzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, folder.getAbsolutePath()); + conf.set(OZONE_METADATA_DIRS, folder.toString()); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, 100, @@ -102,7 +106,7 @@ void init() throws Exception { om = omTestManagers.getOzoneManager(); } - @AfterEach + @AfterAll public void cleanup() throws Exception { if (keyManager != null) { keyManager.stop(); @@ -140,9 +144,23 @@ public void testIrrelevantSstFileDeletion() keyManager.getSnapshotSstFilteringService(); final int keyCount = 100; - String volumeName = "vol1"; + String volumeName = "volz"; String bucketName1 = "buck1"; - createVolumeAndBucket(volumeName, bucketName1); + createVolume(volumeName); + addBucketToVolume(volumeName, bucketName1); + + long countExistingSnapshots = filteringService.getSnapshotFilteredCount().get(); + List previousFiles = activeDbStore.getDb().getSstFileList(); + List listPreviousFiles = new ArrayList(); + int level0FilesCountDiff = 0; + int totalFileCountDiff = 0; + for (LiveFileMetaData fileMetaData : previousFiles) { + totalFileCountDiff++; + listPreviousFiles.add(fileMetaData.fileName()); + if (fileMetaData.level() == 0) { + level0FilesCountDiff++; + } + } createKeys(volumeName, bucketName1, keyCount / 2); activeDbStore.getDb().flush(OmMetadataManagerImpl.KEY_TABLE); @@ -153,8 +171,7 @@ public void testIrrelevantSstFileDeletion() int level0FilesCount = 0; int totalFileCount = 0; - List initialsstFileList = - activeDbStore.getDb().getSstFileList(); + List initialsstFileList = activeDbStore.getDb().getSstFileList(); for (LiveFileMetaData fileMetaData : initialsstFileList) { totalFileCount++; if (fileMetaData.level() == 0) { @@ -162,36 +179,36 @@ public void testIrrelevantSstFileDeletion() } } - assertEquals(totalFileCount, level0FilesCount); + assertEquals(totalFileCount - totalFileCountDiff, level0FilesCount - level0FilesCountDiff); activeDbStore.getDb().compactRange(OmMetadataManagerImpl.KEY_TABLE); int nonLevel0FilesCountAfterCompact = 0; - List nonLevelOFiles = new ArrayList<>(); + List nonLevelOFiles = new ArrayList<>(); for (LiveFileMetaData fileMetaData : activeDbStore.getDb() .getSstFileList()) { if (fileMetaData.level() != 0) { nonLevel0FilesCountAfterCompact++; - nonLevelOFiles.add(fileMetaData); + nonLevelOFiles.add(fileMetaData.fileName()); } } assertThat(nonLevel0FilesCountAfterCompact).isGreaterThan(0); String bucketName2 = "buck2"; - createVolumeAndBucket(volumeName, bucketName2); + addBucketToVolume(volumeName, bucketName2); createKeys(volumeName, bucketName2, keyCount); activeDbStore.getDb().flush(OmMetadataManagerImpl.KEY_TABLE); List allFiles = activeDbStore.getDb().getSstFileList(); String snapshotName1 = "snapshot1"; - writeClient.createSnapshot(volumeName, bucketName2, snapshotName1); + createSnapshot(volumeName, bucketName2, snapshotName1); SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketName2, snapshotName1)); assertFalse(snapshotInfo.isSstFiltered()); - waitForSnapshotsAtLeast(filteringService, 1); - assertEquals(1, filteringService.getSnapshotFilteredCount().get()); + waitForSnapshotsAtLeast(filteringService, countExistingSnapshots + 1); + assertEquals(countExistingSnapshots + 1, filteringService.getSnapshotFilteredCount().get()); Set keysFromActiveDb = getKeysFromDb(om.getMetadataManager(), volumeName, bucketName2); @@ -206,27 +223,33 @@ public void testIrrelevantSstFileDeletion() OmSnapshotManager.getSnapshotPath(conf, snapshotInfo); for (LiveFileMetaData file : allFiles) { + //Skipping the previous files from this check even those also works. + if (listPreviousFiles.contains(file.fileName())) { + continue; + } File sstFile = new File(snapshotDirName + OM_KEY_PREFIX + file.fileName()); - if (nonLevelOFiles.stream() - .anyMatch(o -> file.fileName().equals(o.fileName()))) { + if (nonLevelOFiles.contains(file.fileName())) { assertFalse(sstFile.exists()); } else { assertTrue(sstFile.exists()); } } - assertTrue(snapshotInfo.isSstFiltered()); + // Need to read the sstFiltered flag which is set in background process and + // hence snapshotInfo.isSstFiltered() may not work sometimes. + assertTrue(om.getMetadataManager().getSnapshotInfoTable().get(SnapshotInfo + .getTableKey(volumeName, bucketName2, snapshotName1)).isSstFiltered()); String snapshotName2 = "snapshot2"; final long count; try (BootstrapStateHandler.Lock lock = filteringService.getBootstrapStateLock().lock()) { count = filteringService.getSnapshotFilteredCount().get(); - writeClient.createSnapshot(volumeName, bucketName2, snapshotName2); + createSnapshot(volumeName, bucketName2, snapshotName2); assertThrows(TimeoutException.class, - () -> waitForSnapshotsAtLeast(filteringService, count + 1)); + () -> waitForSnapshotsAtLeast(filteringService, count + 1 + countExistingSnapshots)); assertEquals(count, filteringService.getSnapshotFilteredCount().get()); } @@ -245,9 +268,10 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { String volumeName = "volume1"; List bucketNames = Arrays.asList("bucket1", "bucket2"); + createVolume(volumeName); // Create 2 Buckets for (String bucketName : bucketNames) { - createVolumeAndBucket(volumeName, bucketName); + addBucketToVolume(volumeName, bucketName); } // Write 25 keys in each bucket, 2 sst files would be generated each for // keys in a single bucket @@ -265,8 +289,8 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { keyManager.getSnapshotSstFilteringService(); sstFilteringService.pause(); - writeClient.createSnapshot(volumeName, bucketNames.get(0), "snap1"); - writeClient.createSnapshot(volumeName, bucketNames.get(0), "snap2"); + createSnapshot(volumeName, bucketNames.get(0), "snap1"); + createSnapshot(volumeName, bucketNames.get(0), "snap2"); SnapshotInfo snapshot1Info = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketNames.get(0), "snap1")); @@ -284,15 +308,15 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { await(10_000, 1_000, () -> snap1Current.exists() && snap2Current.exists()); long snap1SstFileCountBeforeFilter = Arrays.stream(snapshot1Dir.listFiles()) - .filter(f -> f.getName().endsWith(".sst")).count(); + .filter(f -> f.getName().endsWith(SST_FILE_EXTENSION)).count(); long snap2SstFileCountBeforeFilter = Arrays.stream(snapshot2Dir.listFiles()) - .filter(f -> f.getName().endsWith(".sst")).count(); + .filter(f -> f.getName().endsWith(SST_FILE_EXTENSION)).count(); // delete snap1 writeClient.deleteSnapshot(volumeName, bucketNames.get(0), "snap1"); sstFilteringService.resume(); // Filtering service will only act on snap2 as it is an active snaphot - waitForSnapshotsAtLeast(sstFilteringService, 2); + waitForSnapshotsAtLeast(sstFilteringService, countTotalSnapshots); long snap1SstFileCountAfterFilter = Arrays.stream(snapshot1Dir.listFiles()) .filter(f -> f.getName().endsWith(SST_FILE_EXTENSION)).count(); long snap2SstFileCountAfterFilter = Arrays.stream(snapshot2Dir.listFiles()) @@ -300,10 +324,12 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { // one sst will be filtered in both active but not in deleted snapshot // as sstFiltering svc won't run on already deleted snapshots but will mark // it as filtered. - assertEquals(2, sstFilteringService.getSnapshotFilteredCount().get()); + assertEquals(countTotalSnapshots, sstFilteringService.getSnapshotFilteredCount().get()); assertEquals(snap1SstFileCountBeforeFilter, snap1SstFileCountAfterFilter); - assertEquals(snap2SstFileCountBeforeFilter - 1, - snap2SstFileCountAfterFilter); + // If method with order 1 is run .sst file from /vol1/buck1 and /vol1/buck2 will be deleted. + // As part of this method .sst file from /volume1/bucket2/ will be deleted. + // sstFiltering won't run on deleted snapshots in /volume1/bucket1. + assertThat(snap2SstFileCountBeforeFilter).isGreaterThan(snap2SstFileCountAfterFilter); } private void createKeys(String volumeName, @@ -316,8 +342,7 @@ private void createKeys(String volumeName, } } - private void createVolumeAndBucket(String volumeName, - String bucketName) + private void createVolume(String volumeName) throws IOException { OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), OmVolumeArgs.newBuilder() @@ -325,7 +350,10 @@ private void createVolumeAndBucket(String volumeName, .setAdminName("a") .setVolume(volumeName) .build()); + } + private void addBucketToVolume(String volumeName, String bucketName) + throws IOException { OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), OmBucketInfo.newBuilder().setVolumeName(volumeName) .setBucketName(bucketName) @@ -348,6 +376,8 @@ private void createKey(OzoneManagerProtocol managerProtocol, .setReplicationConfig(StandaloneReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setLocationInfoList(new ArrayList<>()) + .setOwnerName( + UserGroupInformation.getCurrentUser().getShortUserName()) .build(); //Open and Commit the Key in the Key Manager. OpenKeySession session = managerProtocol.openKey(keyArg); @@ -371,8 +401,9 @@ public void testSstFilteringService() throws Exception { String volumeName = "volume"; List bucketNames = Arrays.asList("bucket", "bucket1", "bucket2"); + createVolume(volumeName); for (String bucketName : bucketNames) { - createVolumeAndBucket(volumeName, bucketName); + addBucketToVolume(volumeName, bucketName); } int keyCount = 150; @@ -407,15 +438,14 @@ public void testSstFilteringService() throws Exception { List snapshotNames = Arrays.asList("snap", "snap-1", "snap-2"); for (int i = 0; i < 3; i++) { - writeClient.createSnapshot(volumeName, bucketNames.get(i), - snapshotNames.get(i)); + createSnapshot(volumeName, bucketNames.get(i), snapshotNames.get(i)); } SstFilteringService sstFilteringService = keyManager.getSnapshotSstFilteringService(); - waitForSnapshotsAtLeast(sstFilteringService, 3); - assertEquals(3, sstFilteringService.getSnapshotFilteredCount().get()); + waitForSnapshotsAtLeast(sstFilteringService, countTotalSnapshots); + assertEquals(countTotalSnapshots, sstFilteringService.getSnapshotFilteredCount().get()); Set keyInBucketAfterFilteringRun = getKeysFromSnapshot(volumeName, bucketNames.get(0), @@ -461,12 +491,18 @@ private Set getKeysFromSnapshot(String volume, String snapshot) throws IOException { SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volume, bucket, snapshot)); - try (ReferenceCounted - snapshotMetadataReader = om.getOmSnapshotManager() - .getSnapshotCache() - .get(snapshotInfo.getTableKey())) { - OmSnapshot omSnapshot = (OmSnapshot) snapshotMetadataReader.get(); + try (ReferenceCounted snapshotMetadataReader = + om.getOmSnapshotManager().getActiveSnapshot( + snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), + snapshotInfo.getName())) { + OmSnapshot omSnapshot = snapshotMetadataReader.get(); return getKeysFromDb(omSnapshot.getMetadataManager(), volume, bucket); } } + + private void createSnapshot(String volumeName, String bucketName, String snapshotName) throws IOException { + writeClient.createSnapshot(volumeName, bucketName, snapshotName); + countTotalSnapshots++; + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java index ce7c0c848f11..d9e9952b92b8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java @@ -321,7 +321,9 @@ public void testRenewTokenFailureRenewalTime() throws Exception { IOException ioException = assertThrows(IOException.class, () -> secretManager.renewToken(token, TEST_USER.toString())); - assertThat(ioException.getMessage()).contains("is expired"); + String errorMessage = ioException.getMessage(); + assertTrue(errorMessage.contains("is expired") || errorMessage.contains("can't be found in cache"), + "\nExpecting:\n" + errorMessage + "\n to contain \"is expired\" or \"can't be found in cache\""); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java new file mode 100644 index 000000000000..23f21e9cdaed --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.security.acl; + +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; + +import java.io.IOException; +import java.util.List; + +/** Helper for ACL tests. */ +final class OzoneNativeAclTestUtil { + + public static void addVolumeAcl( + OMMetadataManager metadataManager, + String volume, + OzoneAcl ozoneAcl + ) throws IOException { + final String volumeKey = metadataManager.getVolumeKey(volume); + final Table volumeTable = metadataManager.getVolumeTable(); + final OmVolumeArgs omVolumeArgs = volumeTable.get(volumeKey); + + omVolumeArgs.addAcl(ozoneAcl); + + volumeTable.addCacheEntry( + new CacheKey<>(volumeKey), + CacheValue.get(1L, omVolumeArgs)); + } + + public static void addBucketAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + OzoneAcl ozoneAcl) throws IOException { + final String bucketKey = metadataManager.getBucketKey(volume, bucket); + final Table bucketTable = metadataManager.getBucketTable(); + final OmBucketInfo omBucketInfo = bucketTable.get(bucketKey); + + omBucketInfo.addAcl(ozoneAcl); + + bucketTable.addCacheEntry( + new CacheKey<>(bucketKey), + CacheValue.get(1L, omBucketInfo)); + } + + public static void addKeyAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + BucketLayout bucketLayout, + String key, + OzoneAcl ozoneAcl + ) throws IOException { + final String objKey = metadataManager.getOzoneKey(volume, bucket, key); + final Table keyTable = metadataManager.getKeyTable(bucketLayout); + final OmKeyInfo omKeyInfo = keyTable.get(objKey); + + omKeyInfo.addAcl(ozoneAcl); + + keyTable.addCacheEntry( + new CacheKey<>(objKey), + CacheValue.get(1L, omKeyInfo)); + } + + public static void setVolumeAcl( + OMMetadataManager metadataManager, + String volume, + List ozoneAcls) throws IOException { + final String volumeKey = metadataManager.getVolumeKey(volume); + final Table volumeTable = metadataManager.getVolumeTable(); + final OmVolumeArgs omVolumeArgs = volumeTable.get(volumeKey); + + omVolumeArgs.setAcls(ozoneAcls); + + volumeTable.addCacheEntry( + new CacheKey<>(volumeKey), + CacheValue.get(1L, omVolumeArgs)); + } + + public static void setBucketAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + List ozoneAcls) throws IOException { + final String bucketKey = metadataManager.getBucketKey(volume, bucket); + final Table bucketTable = metadataManager.getBucketTable(); + final OmBucketInfo omBucketInfo = bucketTable.get(bucketKey); + + omBucketInfo.setAcls(ozoneAcls); + + bucketTable.addCacheEntry( + new CacheKey<>(bucketKey), + CacheValue.get(1L, omBucketInfo)); + } + + public static void setKeyAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + BucketLayout bucketLayout, + String key, + List ozoneAcls) throws IOException { + final String objKey = metadataManager.getOzoneKey(volume, bucket, key); + final Table keyTable = metadataManager.getKeyTable(bucketLayout); + final OmKeyInfo omKeyInfo = keyTable.get(objKey); + + omKeyInfo.setAcls(ozoneAcls); + + keyTable.addCacheEntry( + new CacheKey<>(objKey), + CacheValue.get(1L, omKeyInfo)); + } + + public static List getVolumeAcls( + OMMetadataManager metadataManager, + String volume + ) throws IOException { + return metadataManager.getVolumeTable() + .get(metadataManager.getVolumeKey(volume)) + .getAcls(); + } + + public static List getBucketAcls( + OMMetadataManager metadataManager, + String volume, + String bucket + ) throws IOException { + return metadataManager.getBucketTable() + .get(metadataManager.getBucketKey(volume, bucket)) + .getAcls(); + } + + public static List getKeyAcls( + OMMetadataManager metadataManager, + String volume, + String bucket, + BucketLayout bucketLayout, + String key + ) throws IOException { + return metadataManager.getKeyTable(bucketLayout) + .get(metadataManager.getOzoneKey(volume, bucket, key)) + .getAcls(); + } + + private OzoneNativeAclTestUtil() { + // utilities + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index f5bb8d35350b..c354864a5297 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -21,8 +21,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.OzoneAdmins; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.BucketManager; import org.apache.hadoop.ozone.om.KeyManager; @@ -173,6 +171,7 @@ private void createKey(String volume, .setDataSize(0) .setAcls(OzoneAclUtil.getAclList(testUgi.getUserName(), testUgi.getGroupNames(), ALL, ALL)) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); if (keyName.split(OZONE_URI_DELIMITER).length > 1) { @@ -244,9 +243,9 @@ public void testCheckAccessForBucket( ACLType groupRight, boolean expectedResult) throws Exception { createAll(keyName, prefixName, userRight, groupRight, expectedResult); OzoneAcl userAcl = new OzoneAcl(USER, testUgi.getUserName(), - parentDirUserAcl, ACCESS); + ACCESS, parentDirUserAcl); OzoneAcl groupAcl = new OzoneAcl(GROUP, testUgi.getGroups().size() > 0 ? - testUgi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS); + testUgi.getGroups().get(0) : "", ACCESS, parentDirGroupAcl); // Set access for volume. // We should directly add to table because old API's update to DB. @@ -266,9 +265,9 @@ public void testCheckAccessForKey( ACLType groupRight, boolean expectedResult) throws Exception { createAll(keyName, prefixName, userRight, groupRight, expectedResult); OzoneAcl userAcl = new OzoneAcl(USER, testUgi.getUserName(), - parentDirUserAcl, ACCESS); + ACCESS, parentDirUserAcl); OzoneAcl groupAcl = new OzoneAcl(GROUP, testUgi.getGroups().size() > 0 ? - testUgi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS); + testUgi.getGroups().get(0) : "", ACCESS, parentDirGroupAcl); // Set access for volume & bucket. We should directly add to table // because old API's update to DB. @@ -296,9 +295,9 @@ public void testCheckAccessForPrefix( .build(); OzoneAcl userAcl = new OzoneAcl(USER, testUgi.getUserName(), - parentDirUserAcl, ACCESS); + ACCESS, parentDirUserAcl); OzoneAcl groupAcl = new OzoneAcl(GROUP, testUgi.getGroups().size() > 0 ? - testUgi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS); + testUgi.getGroups().get(0) : "", ACCESS, parentDirGroupAcl); // Set access for volume & bucket. We should directly add to table // because old API's update to DB. @@ -314,45 +313,19 @@ public void testCheckAccessForPrefix( private void setVolumeAcl(List ozoneAcls) throws IOException { - String volumeKey = metadataManager.getVolumeKey(volObj.getVolumeName()); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.setAcls(ozoneAcls); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.setVolumeAcl(metadataManager, vol, ozoneAcls); } private void setBucketAcl(List ozoneAcls) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.setAcls(ozoneAcls); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.setBucketAcl(metadataManager, vol, buck, ozoneAcls); } private void addVolumeAcl(OzoneAcl ozoneAcl) throws IOException { - String volumeKey = metadataManager.getVolumeKey(volObj.getVolumeName()); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.addAcl(ozoneAcl); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.addVolumeAcl(metadataManager, vol, ozoneAcl); } private void addBucketAcl(OzoneAcl ozoneAcl) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.addAcl(ozoneAcl); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.addBucketAcl(metadataManager, vol, buck, ozoneAcl); } private void resetAclsAndValidateAccess( @@ -379,8 +352,8 @@ private void resetAclsAndValidateAccess( * if user/group has access to them. */ for (ACLType a1 : allAcls) { - OzoneAcl newAcl = new OzoneAcl(accessType, getAclName(accessType), a1, - ACCESS); + OzoneAcl newAcl = new OzoneAcl(accessType, getAclName(accessType), ACCESS, a1 + ); // Reset acls to only one right. if (obj.getResourceType() == VOLUME) { @@ -459,7 +432,7 @@ private void resetAclsAndValidateAccess( ACLIdentityType identityType = ACLIdentityType.values()[type]; // Add remaining acls one by one and then check access. OzoneAcl addAcl = new OzoneAcl(identityType, - getAclName(identityType), a2, ACCESS); + getAclName(identityType), ACCESS, a2); // For volume and bucket update to cache. As Old API's update to // only DB not cache. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index f17d477bd793..c3ec7843a6f6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.OzoneAdmins; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.BucketManager; import org.apache.hadoop.ozone.om.KeyManager; @@ -34,7 +32,6 @@ import org.apache.hadoop.ozone.om.VolumeManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; @@ -226,10 +223,10 @@ private void testParentChild(OzoneObj child, .setAclRights(childAclType).build(); OzoneAcl childAcl = new OzoneAcl(USER, - testUgi1.getUserName(), childAclType, ACCESS); + testUgi1.getUserName(), ACCESS, childAclType); OzoneAcl parentAcl = new OzoneAcl(USER, - testUgi1.getUserName(), parentAclType, ACCESS); + testUgi1.getUserName(), ACCESS, parentAclType); assertFalse(nativeAuthorizer.checkAccess(child, requestContext)); if (child.getResourceType() == BUCKET) { @@ -257,7 +254,7 @@ private void testParentChild(OzoneObj child, // add the volume acl (grand-parent), now key access is allowed. OzoneAcl parentVolumeAcl = new OzoneAcl(USER, - testUgi1.getUserName(), READ, ACCESS); + testUgi1.getUserName(), ACCESS, READ); addVolumeAcl(child.getVolumeName(), parentVolumeAcl); assertTrue(nativeAuthorizer.checkAccess( child, requestContext)); @@ -265,88 +262,46 @@ private void testParentChild(OzoneObj child, } private void addVolumeAcl(String vol, OzoneAcl ozoneAcl) throws IOException { - String volumeKey = metadataManager.getVolumeKey(vol); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.addAcl(ozoneAcl); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.addVolumeAcl(metadataManager, vol, ozoneAcl); } private List getVolumeAcls(String vol) throws IOException { - String volumeKey = metadataManager.getVolumeKey(vol); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - return omVolumeArgs.getAcls(); + return OzoneNativeAclTestUtil.getVolumeAcls(metadataManager, vol); } private void setVolumeAcl(String vol, List ozoneAcls) throws IOException { - String volumeKey = metadataManager.getVolumeKey(vol); - OmVolumeArgs omVolumeArgs = metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.setAcls(ozoneAcls); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.setVolumeAcl(metadataManager, vol, ozoneAcls); } private void addKeyAcl(String vol, String buck, String key, OzoneAcl ozoneAcl) throws IOException { - String objKey = metadataManager.getOzoneKey(vol, buck, key); - OmKeyInfo omKeyInfo = - metadataManager.getKeyTable(getBucketLayout()).get(objKey); - - omKeyInfo.addAcl(ozoneAcl); - - metadataManager.getKeyTable(getBucketLayout()) - .addCacheEntry(new CacheKey<>(objKey), - CacheValue.get(1L, omKeyInfo)); + OzoneNativeAclTestUtil.addKeyAcl(metadataManager, vol, buck, getBucketLayout(), key, ozoneAcl); } private void setKeyAcl(String vol, String buck, String key, List ozoneAcls) throws IOException { - String objKey = metadataManager.getOzoneKey(vol, buck, key); - OmKeyInfo omKeyInfo = - metadataManager.getKeyTable(getBucketLayout()).get(objKey); - omKeyInfo.setAcls(ozoneAcls); - - metadataManager.getKeyTable(getBucketLayout()) - .addCacheEntry(new CacheKey<>(objKey), - CacheValue.get(1L, omKeyInfo)); + OzoneNativeAclTestUtil.setKeyAcl(metadataManager, vol, buck, getBucketLayout(), key, ozoneAcls); } private void addBucketAcl(String vol, String buck, OzoneAcl ozoneAcl) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.addAcl(ozoneAcl); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.addBucketAcl(metadataManager, vol, buck, ozoneAcl); } private List getBucketAcls(String vol, String buck) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); + return OzoneNativeAclTestUtil.getBucketAcls(metadataManager, vol, buck); + } - return omBucketInfo.getAcls(); + private List getKeyAcls(String vol, String buck, String key) + throws IOException { + return OzoneNativeAclTestUtil.getKeyAcls(metadataManager, vol, buck, getBucketLayout(), key); } private void setBucketAcl(String vol, String buck, List ozoneAcls) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.setAcls(ozoneAcls); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.setBucketAcl(metadataManager, vol, buck, ozoneAcls); } private static OzoneObjInfo createVolume(String volumeName) @@ -391,6 +346,7 @@ private OzoneObjInfo createKey(String volume, String bucket, String keyName) // here we give test ugi full access .setAcls(OzoneAclUtil.getAclList(testUgi.getUserName(), testUgi.getGroupNames(), ALL, ALL)) + .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java index 983086d251b0..7c1aad0723be 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java @@ -138,6 +138,8 @@ private static void prepareTestKeys() throws IOException { .setReplicationConfig( StandaloneReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) + .setOwnerName( + UserGroupInformation.getCurrentUser().getShortUserName()) .setDataSize(0); if (k == 0) { keyArgsBuilder.setAcls(OzoneAclUtil.getAclList( diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java new file mode 100644 index 000000000000..f5d0c8521334 --- /dev/null +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * FIXME: Hack: This is copied from Hadoop 3.3.6. Remove this interface once + * we drop Hadoop 3.1, 3.2 support. + * Implementers of this interface provide a positioned read API that writes to a + * {@link ByteBuffer} rather than a {@code byte[]}. + * + * @see PositionedReadable + * @see ByteBufferReadable + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface ByteBufferPositionedReadable { + /** + * Reads up to {@code buf.remaining()} bytes into buf from a given position + * in the file and returns the number of bytes read. Callers should use + * {@code buf.limit(...)} to control the size of the desired read and + * {@code buf.position(...)} to control the offset into the buffer the data + * should be written to. + *

    + * After a successful call, {@code buf.position()} will be advanced by the + * number of bytes read and {@code buf.limit()} will be unchanged. + *

    + * In the case of an exception, the state of the buffer (the contents of the + * buffer, the {@code buf.position()}, the {@code buf.limit()}, etc.) is + * undefined, and callers should be prepared to recover from this + * eventuality. + *

    + * Callers should use {@link StreamCapabilities#hasCapability(String)} with + * {@link StreamCapabilities#PREADBYTEBUFFER} to check if the underlying + * stream supports this interface, otherwise they might get a + * {@link UnsupportedOperationException}. + *

    + * Implementations should treat 0-length requests as legitimate, and must not + * signal an error upon their receipt. + *

    + * This does not change the current offset of a file, and is thread-safe. + * + * @param position position within file + * @param buf the ByteBuffer to receive the results of the read operation. + * @return the number of bytes read, possibly zero, or -1 if reached + * end-of-stream + * @throws IOException if there is some error performing the read + */ + int read(long position, ByteBuffer buf) throws IOException; + + /** + * Reads {@code buf.remaining()} bytes into buf from a given position in + * the file or until the end of the data was reached before the read + * operation completed. Callers should use {@code buf.limit(...)} to + * control the size of the desired read and {@code buf.position(...)} to + * control the offset into the buffer the data should be written to. + *

    + * This operation provides similar semantics to + * {@link #read(long, ByteBuffer)}, the difference is that this method is + * guaranteed to read data until the {@link ByteBuffer} is full, or until + * the end of the data stream is reached. + * + * @param position position within file + * @param buf the ByteBuffer to receive the results of the read operation. + * @throws IOException if there is some error performing the read + * @throws EOFException the end of the data was reached before + * the read operation completed + * @see #read(long, ByteBuffer) + */ + void readFully(long position, ByteBuffer buf) throws IOException; +} diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 28812a5a1a9d..e4df4c242bf4 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -67,6 +66,7 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -82,6 +82,10 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import org.slf4j.Logger; @@ -201,8 +205,8 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); this.config = conf; } @@ -525,7 +529,7 @@ private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status, keyInfo.getModificationTime(), keyInfo.getModificationTime(), status.isDirectory() ? (short) 00777 : (short) 00666, - owner, + StringUtils.defaultIfEmpty(keyInfo.getOwnerName(), owner), owner, null, getBlockLocations(status), @@ -609,18 +613,23 @@ public FileChecksum getFileChecksum(String keyName, long length) @Override public String createSnapshot(String pathStr, String snapshotName) throws IOException { - OFSPath ofsPath = new OFSPath(pathStr, config); - return objectStore.createSnapshot(ofsPath.getVolumeName(), - ofsPath.getBucketName(), - snapshotName); + return objectStore.createSnapshot(volume.getName(), bucket.getName(), snapshotName); + } + + @Override + public void renameSnapshot(String pathStr, String snapshotOldName, String snapshotNewName) + throws IOException { + objectStore.renameSnapshot(volume.getName(), + bucket.getName(), + snapshotOldName, + snapshotNewName); } @Override public void deleteSnapshot(String pathStr, String snapshotName) throws IOException { - OFSPath ofsPath = new OFSPath(pathStr, config); - objectStore.deleteSnapshot(ofsPath.getVolumeName(), - ofsPath.getBucketName(), + objectStore.deleteSnapshot(volume.getName(), + bucket.getName(), snapshotName); } @@ -663,12 +672,11 @@ public SnapshotDiffReport getSnapshotDiffReport(Path snapshotDir, } finally { // delete the temp snapshot if (takeTemporaryToSnapshot || takeTemporaryFromSnapshot) { - OFSPath snapPath = new OFSPath(snapshotDir.toString(), config); if (takeTemporaryToSnapshot) { - OzoneClientUtils.deleteSnapshot(objectStore, toSnapshot, snapPath); + OzoneClientUtils.deleteSnapshot(objectStore, toSnapshot, volume.getName(), bucket.getName()); } if (takeTemporaryFromSnapshot) { - OzoneClientUtils.deleteSnapshot(objectStore, fromSnapshot, snapPath); + OzoneClientUtils.deleteSnapshot(objectStore, fromSnapshot, volume.getName(), bucket.getName()); } } } @@ -691,11 +699,21 @@ private SnapshotDiffReportOzone getSnapshotDiffReportOnceComplete( } @Override - public OmKeyInfo recoverFilePrepare(final String pathStr, boolean force) throws IOException { + public LeaseKeyInfo recoverFilePrepare(final String pathStr, boolean force) throws IOException { incrementCounter(Statistic.INVOCATION_RECOVER_FILE_PREPARE, 1); - return ozoneClient.getProxy().getOzoneManagerClient().recoverLease( - volume.getName(), bucket.getName(), pathStr, force); + try { + return ozoneClient.getProxy().getOzoneManagerClient().recoverLease( + volume.getName(), bucket.getName(), pathStr, force); + } catch (OMException ome) { + if (ome.getResult() == NOT_A_FILE) { + throw new FileNotFoundException("Path is not a file. " + ome.getMessage()); + } else if (ome.getResult() == KEY_NOT_FOUND || + ome.getResult() == DIRECTORY_NOT_FOUND) { + throw new FileNotFoundException("File does not exist. " + ome.getMessage()); + } + throw ome; + } } @Override @@ -760,15 +778,21 @@ public void setTimes(String key, long mtime, long atime) throws IOException { @Override public boolean isFileClosed(String pathStr) throws IOException { incrementCounter(Statistic.INVOCATION_IS_FILE_CLOSED, 1); - OFSPath ofsPath = new OFSPath(pathStr, config); - if (!ofsPath.isKey()) { + if (StringUtils.isEmpty(pathStr)) { throw new IOException("not a file"); } - OzoneFileStatus status = bucket.getFileStatus(pathStr); - if (!status.isFile()) { - throw new IOException("not a file"); + try { + OzoneFileStatus status = bucket.getFileStatus(pathStr); + if (!status.isFile()) { + throw new FileNotFoundException("Path is not a file."); + } + return !status.getKeyInfo().isHsync(); + } catch (OMException ome) { + if (ome.getResult() == FILE_NOT_FOUND) { + throw new FileNotFoundException("File does not exist. " + ome.getMessage()); + } + throw ome; } - return !status.getKeyInfo().isHsync(); } @Override diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index dbe3b517e554..cd09cf1d5a8f 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -954,6 +954,12 @@ public Path createSnapshot(Path path, String snapshotName) OM_SNAPSHOT_INDICATOR + OZONE_URI_DELIMITER + snapshot); } + @Override + public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) + throws IOException { + getAdapter().renameSnapshot(pathToKey(path), snapshotOldName, snapshotNewName); + } + @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index e1ed85cff171..b80b1c4d664b 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -79,6 +79,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -100,8 +101,14 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .BUCKET_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .VOLUME_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; /** @@ -209,8 +216,8 @@ public BasicRootedOzoneClientAdapterImpl(String omHost, int omPort, proxy = objectStore.getClientProxy(); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); // Fetches the bucket layout to be used by OFS. initDefaultFsBucketLayout(conf); @@ -893,9 +900,9 @@ public List listStatus(String pathStr, boolean recursive, } OFSPath ofsStartPath = new OFSPath(startPath, config); if (ofsPath.isVolume()) { - String startBucket = ofsStartPath.getBucketName(); + String startBucketPath = ofsStartPath.getNonKeyPath(); return listStatusVolume(ofsPath.getVolumeName(), - recursive, startBucket, numEntries, uri, workingDir, username); + recursive, startBucketPath, numEntries, uri, workingDir, username); } if (ofsPath.isSnapshotPath()) { @@ -1015,7 +1022,7 @@ private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status, keyInfo.getModificationTime(), keyInfo.getModificationTime(), status.isDirectory() ? (short) 00777 : (short) 00666, - owner, + StringUtils.defaultIfEmpty(keyInfo.getOwnerName(), owner), owner, null, getBlockLocations(status), @@ -1265,6 +1272,16 @@ public String createSnapshot(String pathStr, String snapshotName) snapshotName); } + @Override + public void renameSnapshot(String pathStr, String snapshotOldName, String snapshotNewName) + throws IOException { + OFSPath ofsPath = new OFSPath(pathStr, config); + proxy.renameSnapshot(ofsPath.getVolumeName(), + ofsPath.getBucketName(), + snapshotOldName, + snapshotNewName); + } + @Override public void deleteSnapshot(String pathStr, String snapshotName) throws IOException { @@ -1318,10 +1335,10 @@ public SnapshotDiffReport getSnapshotDiffReport(Path snapshotDir, } finally { // delete the temp snapshot if (takeTemporaryToSnapshot) { - OzoneClientUtils.deleteSnapshot(objectStore, toSnapshot, ofsPath); + OzoneClientUtils.deleteSnapshot(objectStore, toSnapshot, volume, bucket); } if (takeTemporaryFromSnapshot) { - OzoneClientUtils.deleteSnapshot(objectStore, fromSnapshot, ofsPath); + OzoneClientUtils.deleteSnapshot(objectStore, fromSnapshot, volume, bucket); } } } @@ -1348,30 +1365,50 @@ public boolean isFileClosed(String pathStr) throws IOException { OFSPath ofsPath = new OFSPath(pathStr, config); String key = ofsPath.getKeyName(); if (ofsPath.isRoot() || ofsPath.isVolume()) { - throw new IOException("not a file"); + throw new FileNotFoundException("Path is not a file."); } else { - OzoneBucket bucket = getBucket(ofsPath, false); - if (ofsPath.isSnapshotPath()) { - throw new IOException("file is in a snapshot."); - } else { - OzoneFileStatus status = bucket.getFileStatus(key); - if (!status.isFile()) { - throw new IOException("not a file"); + try { + OzoneBucket bucket = getBucket(ofsPath, false); + if (ofsPath.isSnapshotPath()) { + throw new IOException("file is in a snapshot."); + } else { + OzoneFileStatus status = bucket.getFileStatus(key); + if (!status.isFile()) { + throw new FileNotFoundException("Path is not a file."); + } + return !status.getKeyInfo().isHsync(); + } + } catch (OMException ome) { + if (ome.getResult() == FILE_NOT_FOUND || + ome.getResult() == VOLUME_NOT_FOUND || + ome.getResult() == BUCKET_NOT_FOUND) { + throw new FileNotFoundException("File does not exist. " + ome.getMessage()); } - return !status.getKeyInfo().isHsync(); + throw ome; } } } @Override - public OmKeyInfo recoverFilePrepare(final String pathStr, boolean force) throws IOException { + public LeaseKeyInfo recoverFilePrepare(final String pathStr, boolean force) throws IOException { incrementCounter(Statistic.INVOCATION_RECOVER_FILE_PREPARE, 1); OFSPath ofsPath = new OFSPath(pathStr, config); - OzoneVolume volume = objectStore.getVolume(ofsPath.getVolumeName()); - OzoneBucket bucket = getBucket(ofsPath, false); - return ozoneClient.getProxy().getOzoneManagerClient().recoverLease( - volume.getName(), bucket.getName(), ofsPath.getKeyName(), force); + try { + OzoneBucket bucket = getBucket(ofsPath, false); + return ozoneClient.getProxy().getOzoneManagerClient().recoverLease( + bucket.getVolumeName(), bucket.getName(), ofsPath.getKeyName(), force); + } catch (OMException ome) { + if (ome.getResult() == NOT_A_FILE) { + throw new FileNotFoundException("Path is not a file. " + ome.getMessage()); + } else if (ome.getResult() == KEY_NOT_FOUND || + ome.getResult() == DIRECTORY_NOT_FOUND || + ome.getResult() == VOLUME_NOT_FOUND || + ome.getResult() == BUCKET_NOT_FOUND) { + throw new FileNotFoundException("File does not exist. " + ome.getMessage()); + } + throw ome; + } } @Override diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index b13d726371c4..8ad50058f1bb 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs.ozone; import com.google.common.base.Preconditions; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; @@ -41,6 +43,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.ozone.OFSPath; @@ -239,7 +242,12 @@ public FSDataInputStream open(Path path, int bufferSize) throws IOException { statistics.incrementReadOps(1); LOG.trace("open() path: {}", path); final String key = pathToKey(path); - return new FSDataInputStream(createFSInputStream(adapter.readFile(key))); + return TracingUtil.executeInNewSpan("ofs open", + () -> { + Span span = GlobalTracer.get().activeSpan(); + span.setTag("path", key); + return new FSDataInputStream(createFSInputStream(adapter.readFile(key))); + }); } protected InputStream createFSInputStream(InputStream inputStream) { @@ -263,7 +271,8 @@ public FSDataOutputStream create(Path f, FsPermission permission, incrementCounter(Statistic.INVOCATION_CREATE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(f); - return createOutputStream(key, replication, overwrite, true); + return TracingUtil.executeInNewSpan("ofs create", + () -> createOutputStream(key, replication, overwrite, true)); } @Override @@ -277,8 +286,10 @@ public FSDataOutputStream createNonRecursive(Path path, incrementCounter(Statistic.INVOCATION_CREATE_NON_RECURSIVE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(path); - return createOutputStream(key, - replication, flags.contains(CreateFlag.OVERWRITE), false); + return TracingUtil.executeInNewSpan("ofs createNonRecursive", + () -> + createOutputStream(key, + replication, flags.contains(CreateFlag.OVERWRITE), false)); } private OutputStream selectOutputStream(String key, short replication, @@ -374,6 +385,14 @@ boolean processKeyPath(List keyPathList) throws IOException { */ @Override public boolean rename(Path src, Path dst) throws IOException { + return TracingUtil.executeInNewSpan("ofs rename", + () -> renameInSpan(src, dst)); + } + + private boolean renameInSpan(Path src, Path dst) throws IOException { + Span span = GlobalTracer.get().activeSpan(); + span.setTag("src", src.toString()) + .setTag("dst", dst.toString()); incrementCounter(Statistic.INVOCATION_RENAME, 1); statistics.incrementWriteOps(1); if (src.equals(dst)) { @@ -526,16 +545,23 @@ protected void rename(final Path src, final Path dst, @Override public Path createSnapshot(Path path, String snapshotName) throws IOException { - String snapshot = getAdapter() - .createSnapshot(pathToKey(path), snapshotName); + String snapshot = TracingUtil.executeInNewSpan("ofs createSnapshot", + () -> getAdapter().createSnapshot(pathToKey(path), snapshotName)); return new Path(OzoneFSUtils.trimPathToDepth(path, PATH_DEPTH_TO_BUCKET), OM_SNAPSHOT_INDICATOR + OZONE_URI_DELIMITER + snapshot); } + @Override + public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) + throws IOException { + getAdapter().renameSnapshot(pathToKey(path), snapshotOldName, snapshotNewName); + } + @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { - adapter.deleteSnapshot(pathToKey(path), snapshotName); + TracingUtil.executeInNewSpan("ofs deleteSnapshot", + () -> adapter.deleteSnapshot(pathToKey(path), snapshotName)); } private class DeleteIterator extends OzoneListingIterator { @@ -666,6 +692,11 @@ private boolean innerDelete(Path f, boolean recursive) throws IOException { */ @Override public boolean delete(Path f, boolean recursive) throws IOException { + return TracingUtil.executeInNewSpan("ofs delete", + () -> deleteInSpan(f, recursive)); + } + + private boolean deleteInSpan(Path f, boolean recursive) throws IOException { incrementCounter(Statistic.INVOCATION_DELETE, 1); statistics.incrementWriteOps(1); LOG.debug("Delete path {} - recursive {}", f, recursive); @@ -688,7 +719,7 @@ public boolean delete(Path f, boolean recursive) throws IOException { LOG.warn("Recursive volume delete using ofs is not supported"); throw new IOException("Recursive volume delete using " + "ofs is not supported. " + - "Instead use 'ozone sh volume delete -r -skipTrash " + + "Instead use 'ozone sh volume delete -r " + "-id ' command"); } return deleteVolume(f, ofsPath); @@ -883,7 +914,8 @@ private boolean o3Exists(final Path f) throws IOException { @Override public FileStatus[] listStatus(Path f) throws IOException { - return convertFileStatusArr(listStatusAdapter(f)); + return TracingUtil.executeInNewSpan("ofs listStatus", + () -> convertFileStatusArr(listStatusAdapter(f))); } private FileStatus[] convertFileStatusArr( @@ -940,7 +972,8 @@ public Path getWorkingDirectory() { @Override public Token getDelegationToken(String renewer) throws IOException { - return adapter.getDelegationToken(renewer); + return TracingUtil.executeInNewSpan("ofs getDelegationToken", + () -> adapter.getDelegationToken(renewer)); } /** @@ -1008,7 +1041,8 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { if (isEmpty(key)) { return false; } - return mkdir(f); + return TracingUtil.executeInNewSpan("ofs mkdirs", + () -> mkdir(f)); } @Override @@ -1019,7 +1053,8 @@ public long getDefaultBlockSize() { @Override public FileStatus getFileStatus(Path f) throws IOException { - return convertFileStatus(getFileStatusAdapter(f)); + return TracingUtil.executeInNewSpan("ofs getFileStatus", + () -> convertFileStatus(getFileStatusAdapter(f))); } public FileStatusAdapter getFileStatusAdapter(Path f) throws IOException { @@ -1090,7 +1125,8 @@ public boolean exists(Path f) throws IOException { public FileChecksum getFileChecksum(Path f, long length) throws IOException { incrementCounter(Statistic.INVOCATION_GET_FILE_CHECKSUM); String key = pathToKey(f); - return adapter.getFileChecksum(key, length); + return TracingUtil.executeInNewSpan("ofs getFileChecksum", + () -> adapter.getFileChecksum(key, length)); } @Override @@ -1502,6 +1538,11 @@ FileStatus convertFileStatus(FileStatusAdapter fileStatusAdapter) { @Override public ContentSummary getContentSummary(Path f) throws IOException { + return TracingUtil.executeInNewSpan("ofs getContentSummary", + () -> getContentSummaryInSpan(f)); + } + + private ContentSummary getContentSummaryInSpan(Path f) throws IOException { FileStatusAdapter status = getFileStatusAdapter(f); if (status.isFile()) { @@ -1577,7 +1618,8 @@ public void setTimes(Path f, long mtime, long atime) throws IOException { if (key.equals("NONE")) { throw new FileNotFoundException("File not found. path /NONE."); } - adapter.setTimes(key, mtime, atime); + TracingUtil.executeInNewSpan("ofs setTimes", + () -> adapter.setTimes(key, mtime, atime)); } protected boolean setSafeModeUtil(SafeModeAction action, @@ -1589,6 +1631,7 @@ protected boolean setSafeModeUtil(SafeModeAction action, statistics.incrementWriteOps(1); } LOG.trace("setSafeMode() action:{}", action); - return getAdapter().setSafeMode(action, isChecked); + return TracingUtil.executeInNewSpan("ofs setSafeMode", + () -> getAdapter().setSafeMode(action, isChecked)); } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java index 290546e4a104..30e0c32265bf 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java @@ -35,6 +35,7 @@ public boolean hasCapability(String capability) { switch (StringUtils.toLowerCase(capability)) { case StreamCapabilities.READBYTEBUFFER: case StreamCapabilities.UNBUFFER: + case StreamCapabilities.PREADBYTEBUFFER: return true; default: return false; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java index c7444a389d9b..7e78d6650ee3 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java @@ -28,8 +28,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.security.token.Token; @@ -92,13 +92,15 @@ FileStatusAdapter getFileStatus(String key, URI uri, String createSnapshot(String pathStr, String snapshotName) throws IOException; + void renameSnapshot(String pathStr, String snapshotOldName, String snapshotNewName) throws IOException; + void deleteSnapshot(String pathStr, String snapshotName) throws IOException; SnapshotDiffReport getSnapshotDiffReport(Path snapshotDir, String fromSnapshot, String toSnapshot) throws IOException, InterruptedException; - OmKeyInfo recoverFilePrepare(String pathStr, boolean force) throws IOException; + LeaseKeyInfo recoverFilePrepare(String pathStr, boolean force) throws IOException; void recoverFile(OmKeyArgs keyArgs) throws IOException; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java index 4cb9084b2b91..383ad6db495b 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.ozone.client.checksum.BaseFileChecksumHelper; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -270,14 +269,14 @@ public static int limitValue(int confValue, String confName, int maxLimit) { } public static void deleteSnapshot(ObjectStore objectStore, - String snapshot, OFSPath snapPath) { + String snapshot, String volumeName, String bucketName) { try { - objectStore.deleteSnapshot(snapPath.getVolumeName(), - snapPath.getBucketName(), snapshot); + objectStore.deleteSnapshot(volumeName, + bucketName, snapshot); } catch (IOException exception) { LOG.warn("Failed to delete the temp snapshot with name {} in bucket" + " {} and volume {} after snapDiff op. Exception : {}", snapshot, - snapPath.getBucketName(), snapPath.getVolumeName(), + bucketName, volumeName, exception.getMessage()); } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index 918640799c71..4dc70bfa569d 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -18,18 +18,24 @@ package org.apache.hadoop.fs.ozone; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.ReadOnlyBufferException; +import io.opentracing.Scope; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.fs.ByteBufferPositionedReadable; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.hdds.tracing.TracingUtil; /** * The input stream for Ozone file system. @@ -40,7 +46,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class OzoneFSInputStream extends FSInputStream - implements ByteBufferReadable, CanUnbuffer { + implements ByteBufferReadable, CanUnbuffer, ByteBufferPositionedReadable { private final InputStream inputStream; private final Statistics statistics; @@ -52,25 +58,40 @@ public OzoneFSInputStream(InputStream inputStream, Statistics statistics) { @Override public int read() throws IOException { - int byteRead = inputStream.read(); - if (statistics != null && byteRead >= 0) { - statistics.incrementBytesRead(1); + Span span = GlobalTracer.get() + .buildSpan("OzoneFSInputStream.read").start(); + try (Scope scope = GlobalTracer.get().activateSpan(span)) { + int byteRead = inputStream.read(); + if (statistics != null && byteRead >= 0) { + statistics.incrementBytesRead(1); + } + return byteRead; + } finally { + span.finish(); } - return byteRead; } @Override public int read(byte[] b, int off, int len) throws IOException { - int bytesRead = inputStream.read(b, off, len); - if (statistics != null && bytesRead >= 0) { - statistics.incrementBytesRead(bytesRead); + Span span = GlobalTracer.get() + .buildSpan("OzoneFSInputStream.read").start(); + try (Scope scope = GlobalTracer.get().activateSpan(span)) { + span.setTag("offset", off) + .setTag("length", len); + int bytesRead = inputStream.read(b, off, len); + if (statistics != null && bytesRead >= 0) { + statistics.incrementBytesRead(bytesRead); + } + return bytesRead; + } finally { + span.finish(); } - return bytesRead; } @Override public synchronized void close() throws IOException { - inputStream.close(); + TracingUtil.executeInNewSpan("OzoneFSInputStream.close", + inputStream::close); } @Override @@ -101,6 +122,11 @@ public int available() throws IOException { */ @Override public int read(ByteBuffer buf) throws IOException { + return TracingUtil.executeInNewSpan("OzoneFSInputStream.read(ByteBuffer)", + () -> readInTrace(buf)); + } + + private int readInTrace(ByteBuffer buf) throws IOException { if (buf.isReadOnly()) { throw new ReadOnlyBufferException(); } @@ -137,4 +163,49 @@ public void unbuffer() { ((CanUnbuffer) inputStream).unbuffer(); } } + + /** + * @param buf the ByteBuffer to receive the results of the read operation. + * @param position offset + * @return the number of bytes read, possibly zero, or -1 if + * reach end-of-stream + * @throws IOException if there is some error performing the read + */ + @Override + public int read(long position, ByteBuffer buf) throws IOException { + if (!buf.hasRemaining()) { + return 0; + } + long oldPos = this.getPos(); + int bytesRead; + try { + ((Seekable) inputStream).seek(position); + bytesRead = ((ByteBufferReadable) inputStream).read(buf); + } catch (EOFException e) { + // Either position is negative or it has reached EOF + return -1; + } finally { + ((Seekable) inputStream).seek(oldPos); + } + return bytesRead; + } + + /** + * @param buf the ByteBuffer to receive the results of the read operation. + * @param position offset + * @return void + * @throws IOException if there is some error performing the read + * @throws EOFException if end of file reached before reading fully + */ + @Override + public void readFully(long position, ByteBuffer buf) throws IOException { + int bytesRead; + for (int readCount = 0; buf.hasRemaining(); readCount += bytesRead) { + bytesRead = this.read(position + (long)readCount, buf); + if (bytesRead < 0) { + // Still buffer has space to read but stream has already reached EOF + throw new EOFException("End of file reached before reading fully."); + } + } + } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java index 141a40469419..c5f62d6f68ba 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java @@ -18,7 +18,10 @@ package org.apache.hadoop.fs.ozone; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import java.io.IOException; @@ -42,17 +45,24 @@ public OzoneFSOutputStream(OzoneOutputStream outputStream) { @Override public void write(int b) throws IOException { - outputStream.write(b); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.write", + () -> outputStream.write(b)); } @Override public void write(byte[] b, int off, int len) throws IOException { - outputStream.write(b, off, len); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.write", + () -> { + Span span = GlobalTracer.get().activeSpan(); + span.setTag("length", len); + outputStream.write(b, off, len); + }); } @Override public synchronized void flush() throws IOException { - outputStream.flush(); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.flush", + outputStream::flush); } @Override @@ -67,7 +77,8 @@ public void hflush() throws IOException { @Override public void hsync() throws IOException { - outputStream.hsync(); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.hsync", + outputStream::hsync); } protected OzoneOutputStream getWrappedOutputStream() { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzonePathCapabilities.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzonePathCapabilities.java index 5668ff281a9e..4f15d1b62127 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzonePathCapabilities.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzonePathCapabilities.java @@ -44,6 +44,7 @@ public static boolean hasPathCapability(final Path path, case CommonPathCapabilities.FS_ACLS: case CommonPathCapabilities.FS_CHECKSUMS: case CommonPathCapabilities.FS_SNAPSHOTS: + case CommonPathCapabilities.LEASE_RECOVERABLE: return true; default: return false; diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index ce567348114b..8e31b055daa4 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -123,6 +123,7 @@ unpack + ${maven.shade.skip} META-INF/versions/**/*.* diff --git a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml index 4c2e4d5b9c8d..4e35e986c155 100644 --- a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml @@ -54,6 +54,7 @@ unpack + ${maven.shade.skip} META-INF/versions/**/*.* @@ -77,6 +78,7 @@ shade + ${maven.shade.skip} diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml index 48573e633803..6c900c56f86a 100644 --- a/hadoop-ozone/ozonefs-hadoop3/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml @@ -74,6 +74,7 @@ unpack + ${maven.shade.skip} META-INF/versions/**/*.* diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index b6cc22bbad09..4de4b22908d7 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -35,12 +35,15 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.security.token.DelegationTokenIssuer; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; /** @@ -142,9 +145,9 @@ public boolean recoverLease(Path f) throws IOException { Path qualifiedPath = makeQualified(f); String key = pathToKey(qualifiedPath); - OmKeyInfo keyInfo = null; + LeaseKeyInfo leaseKeyInfo; try { - keyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); + leaseKeyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.KEY_ALREADY_CLOSED) { // key is already closed, let's just return success @@ -154,25 +157,41 @@ public boolean recoverLease(Path f) throws IOException { } // finalize the final block and get block length - List locationInfoList = keyInfo.getLatestVersionLocations().getLocationList(); + List locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList(); if (!locationInfoList.isEmpty()) { OmKeyLocationInfo block = locationInfoList.get(locationInfoList.size() - 1); try { block.setLength(getAdapter().finalizeBlock(block)); } catch (Throwable e) { - if (!forceRecovery) { + if (e instanceof StorageContainerException && (((StorageContainerException) e).getResult().equals(NO_SUCH_BLOCK) + || ((StorageContainerException) e).getResult().equals(CONTAINER_NOT_FOUND)) + && !leaseKeyInfo.getIsKeyInfo() && locationInfoList.size() > 1) { + locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList().subList(0, + locationInfoList.size() - 1); + block = locationInfoList.get(locationInfoList.size() - 1); + try { + block.setLength(getAdapter().finalizeBlock(block)); + } catch (Throwable exp) { + if (!forceRecovery) { + throw exp; + } + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, exp); + } + } else if (!forceRecovery) { throw e; + } else { + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, e); } - LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", - FORCE_LEASE_RECOVERY_ENV, e); } } // recover and commit file long keyLength = locationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()).setKeyName(keyInfo.getKeyName()) - .setReplicationConfig(keyInfo.getReplicationConfig()).setDataSize(keyLength) + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(leaseKeyInfo.getKeyInfo().getVolumeName()) + .setBucketName(leaseKeyInfo.getKeyInfo().getBucketName()).setKeyName(leaseKeyInfo.getKeyInfo().getKeyName()) + .setReplicationConfig(leaseKeyInfo.getKeyInfo().getReplicationConfig()).setDataSize(keyLength) .setLocationInfoList(locationInfoList) .build(); getAdapter().recoverFile(keyArgs); diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java index e6eba955e4d9..3025b1af03be 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java @@ -30,9 +30,10 @@ import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.security.token.DelegationTokenIssuer; @@ -41,6 +42,8 @@ import java.net.URI; import java.util.List; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; /** @@ -146,9 +149,9 @@ public boolean recoverLease(final Path f) throws IOException { LOG.trace("recoverLease() path:{}", f); Path qualifiedPath = makeQualified(f); String key = pathToKey(qualifiedPath); - OmKeyInfo keyInfo = null; + LeaseKeyInfo leaseKeyInfo; try { - keyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); + leaseKeyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.KEY_ALREADY_CLOSED) { // key is already closed, let's just return success @@ -158,25 +161,41 @@ public boolean recoverLease(final Path f) throws IOException { } // finalize the final block and get block length - List locationInfoList = keyInfo.getLatestVersionLocations().getLocationList(); + List locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList(); if (!locationInfoList.isEmpty()) { OmKeyLocationInfo block = locationInfoList.get(locationInfoList.size() - 1); try { block.setLength(getAdapter().finalizeBlock(block)); } catch (Throwable e) { - if (!forceRecovery) { + if (e instanceof StorageContainerException && (((StorageContainerException) e).getResult().equals(NO_SUCH_BLOCK) + || ((StorageContainerException) e).getResult().equals(CONTAINER_NOT_FOUND)) + && !leaseKeyInfo.getIsKeyInfo() && locationInfoList.size() > 1) { + locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList().subList(0, + locationInfoList.size() - 1); + block = locationInfoList.get(locationInfoList.size() - 1); + try { + block.setLength(getAdapter().finalizeBlock(block)); + } catch (Throwable exp) { + if (!forceRecovery) { + throw exp; + } + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, exp); + } + } else if (!forceRecovery) { throw e; + } else { + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, e); } - LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", - FORCE_LEASE_RECOVERY_ENV, e); } } // recover and commit file long keyLength = locationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()).setKeyName(keyInfo.getKeyName()) - .setReplicationConfig(keyInfo.getReplicationConfig()).setDataSize(keyLength) + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(leaseKeyInfo.getKeyInfo().getVolumeName()) + .setBucketName(leaseKeyInfo.getKeyInfo().getBucketName()).setKeyName(leaseKeyInfo.getKeyInfo().getKeyName()) + .setReplicationConfig(leaseKeyInfo.getKeyInfo().getReplicationConfig()).setDataSize(keyLength) .setLocationInfoList(locationInfoList) .build(); getAdapter().recoverFile(keyArgs); diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index 6ff4e3c701a7..417a4f9dca3b 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -91,6 +91,7 @@ shade + ${maven.shade.skip} @@ -128,7 +129,6 @@ org.apache.commons.digester.**.* org.apache.commons.io.**.* org.apache.commons.logging.**.* - org.apache.commons.pool2.**.* org.apache.commons.validator.**.* org.apache.commons.lang3.**.* org.sqlite.**.* diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index b6cc22bbad09..4de4b22908d7 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -35,12 +35,15 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.security.token.DelegationTokenIssuer; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; /** @@ -142,9 +145,9 @@ public boolean recoverLease(Path f) throws IOException { Path qualifiedPath = makeQualified(f); String key = pathToKey(qualifiedPath); - OmKeyInfo keyInfo = null; + LeaseKeyInfo leaseKeyInfo; try { - keyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); + leaseKeyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.KEY_ALREADY_CLOSED) { // key is already closed, let's just return success @@ -154,25 +157,41 @@ public boolean recoverLease(Path f) throws IOException { } // finalize the final block and get block length - List locationInfoList = keyInfo.getLatestVersionLocations().getLocationList(); + List locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList(); if (!locationInfoList.isEmpty()) { OmKeyLocationInfo block = locationInfoList.get(locationInfoList.size() - 1); try { block.setLength(getAdapter().finalizeBlock(block)); } catch (Throwable e) { - if (!forceRecovery) { + if (e instanceof StorageContainerException && (((StorageContainerException) e).getResult().equals(NO_SUCH_BLOCK) + || ((StorageContainerException) e).getResult().equals(CONTAINER_NOT_FOUND)) + && !leaseKeyInfo.getIsKeyInfo() && locationInfoList.size() > 1) { + locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList().subList(0, + locationInfoList.size() - 1); + block = locationInfoList.get(locationInfoList.size() - 1); + try { + block.setLength(getAdapter().finalizeBlock(block)); + } catch (Throwable exp) { + if (!forceRecovery) { + throw exp; + } + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, exp); + } + } else if (!forceRecovery) { throw e; + } else { + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, e); } - LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", - FORCE_LEASE_RECOVERY_ENV, e); } } // recover and commit file long keyLength = locationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()).setKeyName(keyInfo.getKeyName()) - .setReplicationConfig(keyInfo.getReplicationConfig()).setDataSize(keyLength) + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(leaseKeyInfo.getKeyInfo().getVolumeName()) + .setBucketName(leaseKeyInfo.getKeyInfo().getBucketName()).setKeyName(leaseKeyInfo.getKeyInfo().getKeyName()) + .setReplicationConfig(leaseKeyInfo.getKeyInfo().getReplicationConfig()).setDataSize(keyLength) .setLocationInfoList(locationInfoList) .build(); getAdapter().recoverFile(keyArgs); diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java index 36aa0e5f27c8..c06a6b7644e8 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java @@ -19,20 +19,23 @@ package org.apache.hadoop.fs.ozone; import com.google.common.base.Strings; +import io.opentracing.util.GlobalTracer; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LeaseRecoverable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.SafeMode; import org.apache.hadoop.fs.SafeModeAction; +import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.security.token.DelegationTokenIssuer; @@ -41,6 +44,8 @@ import java.net.URI; import java.util.List; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; /** @@ -135,13 +140,18 @@ public boolean hasPathCapability(final Path path, final String capability) @Override public boolean recoverLease(final Path f) throws IOException { + return TracingUtil.executeInNewSpan("ofs recoverLease", + () -> recoverLeaseTraced(f)); + } + private boolean recoverLeaseTraced(final Path f) throws IOException { + GlobalTracer.get().activeSpan().setTag("path", f.toString()); statistics.incrementWriteOps(1); LOG.trace("recoverLease() path:{}", f); Path qualifiedPath = makeQualified(f); String key = pathToKey(qualifiedPath); - OmKeyInfo keyInfo = null; + LeaseKeyInfo leaseKeyInfo; try { - keyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); + leaseKeyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.KEY_ALREADY_CLOSED) { // key is already closed, let's just return success @@ -151,26 +161,42 @@ public boolean recoverLease(final Path f) throws IOException { } // finalize the final block and get block length - List keyLocationInfoList = keyInfo.getLatestVersionLocations().getLocationList(); - if (!keyLocationInfoList.isEmpty()) { - OmKeyLocationInfo block = keyLocationInfoList.get(keyLocationInfoList.size() - 1); + List locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList(); + if (!locationInfoList.isEmpty()) { + OmKeyLocationInfo block = locationInfoList.get(locationInfoList.size() - 1); try { block.setLength(getAdapter().finalizeBlock(block)); } catch (Throwable e) { - if (!forceRecovery) { + if (e instanceof StorageContainerException && (((StorageContainerException) e).getResult().equals(NO_SUCH_BLOCK) + || ((StorageContainerException) e).getResult().equals(CONTAINER_NOT_FOUND)) + && !leaseKeyInfo.getIsKeyInfo() && locationInfoList.size() > 1) { + locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList().subList(0, + locationInfoList.size() - 1); + block = locationInfoList.get(locationInfoList.size() - 1); + try { + block.setLength(getAdapter().finalizeBlock(block)); + } catch (Throwable exp) { + if (!forceRecovery) { + throw exp; + } + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, exp); + } + } else if (!forceRecovery) { throw e; + } else { + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, e); } - LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", - FORCE_LEASE_RECOVERY_ENV, e); } } // recover and commit file - long keyLength = keyLocationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()).setKeyName(keyInfo.getKeyName()) - .setReplicationConfig(keyInfo.getReplicationConfig()).setDataSize(keyLength) - .setLocationInfoList(keyLocationInfoList) + long keyLength = locationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(leaseKeyInfo.getKeyInfo().getVolumeName()) + .setBucketName(leaseKeyInfo.getKeyInfo().getBucketName()).setKeyName(leaseKeyInfo.getKeyInfo().getKeyName()) + .setReplicationConfig(leaseKeyInfo.getKeyInfo().getReplicationConfig()).setDataSize(keyLength) + .setLocationInfoList(locationInfoList) .build(); getAdapter().recoverFile(keyArgs); return true; @@ -178,6 +204,12 @@ public boolean recoverLease(final Path f) throws IOException { @Override public boolean isFileClosed(Path f) throws IOException { + return TracingUtil.executeInNewSpan("ofs isFileClosed", + () -> isFileClosedTraced(f)); + } + + private boolean isFileClosedTraced(Path f) throws IOException { + GlobalTracer.get().activeSpan().setTag("path", f.toString()); statistics.incrementWriteOps(1); LOG.trace("isFileClosed() path:{}", f); Path qualifiedPath = makeQualified(f); diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 4b61b37bc40c..059db6b95130 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -305,66 +305,6 @@ - - org.apache.rat - apache-rat-plugin - - - **/*.json - **/hs_err*.log - **/target/** - .gitattributes - **/.attach_* - **/**.rej - **/.factorypath - public - **/*.iml - **/output.xml - **/log.html - **/report.html - **/.idea/** - **/.ssh/id_rsa* - dev-support/*tests - dev-support/checkstyle* - dev-support/jdiff/** - src/contrib/** - src/main/webapps/datanode/robots.txt - src/main/webapps/hdfs/robots.txt - src/main/webapps/journal/robots.txt - src/main/webapps/router/robots.txt - src/main/webapps/secondary/robots.txt - src/site/resources/images/* - src/test/all-tests - src/test/empty-file - src/test/resources/*.log - src/test/resources/*.tgz - src/test/resources/data* - src/test/resources/empty-file - src/test/resources/ssl/* - src/main/compose/ozonesecure/docker-image/runner/build/apache-rat-0.12/README-CLI.txt - src/main/compose/ozonesecure/docker-image/runner/build/apache-rat-0.12/README-ANT.txt - webapps/static/angular-1.8.0.min.js - webapps/static/angular-nvd3-1.0.9.min.js - webapps/static/angular-route-1.8.0.min.js - webapps/static/bootstrap-3.4.1/** - webapps/static/d3-3.5.17.min.js - webapps/static/jquery-3.5.1.min.js - webapps/static/jquery.dataTables.min.js - webapps/static/nvd3-1.8.5.min.css.map - webapps/static/nvd3-1.8.5.min.css - webapps/static/nvd3-1.8.5.min.js.map - webapps/static/nvd3-1.8.5.min.js - **/dependency-reduced-pom.xml - **/node_modules/** - **/yarn.lock - **/pnpm-lock.yaml - **/ozone-recon-web/build/** - src/test/resources/prometheus-test-response.txt - src/main/license/** - src/main/resources/proto.lock - - - org.apache.maven.plugins maven-jar-plugin @@ -406,6 +346,13 @@ ozonefs-hadoop3-client + + go-offline + + ozonefs-shaded + ozonefs-hadoop2 + + build-with-recon diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 43e2d728b763..4d62ca886cda 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -51,7 +51,8 @@ public enum UnHealthyContainerStates { UNDER_REPLICATED, OVER_REPLICATED, MIS_REPLICATED, - ALL_REPLICAS_UNHEALTHY + ALL_REPLICAS_UNHEALTHY, + NEGATIVE_SIZE // Added new state to track containers with negative sizes } private static final String CONTAINER_ID = "container_id"; diff --git a/hadoop-ozone/recon/.gitignore b/hadoop-ozone/recon/.gitignore new file mode 100644 index 000000000000..3c3629e647f5 --- /dev/null +++ b/hadoop-ozone/recon/.gitignore @@ -0,0 +1 @@ +node_modules diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index 134092146e54..9c79a869c41d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -36,6 +36,11 @@ private ReconConstants() { public static final String RECON_SCM_SNAPSHOT_DB = "scm.snapshot.db"; // By default, limit the number of results returned + + /** + * The maximum number of top disk usage records to return in a /du response. + */ + public static final int DISK_USAGE_TOP_RECORDS_LIMIT = 30; public static final String DEFAULT_OPEN_KEY_INCLUDE_NON_FSO = "false"; public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false"; public static final String DEFAULT_FETCH_COUNT = "1000"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java new file mode 100644 index 000000000000..63fdae252aa6 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.recon; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import javax.inject.Singleton; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * ReconContext is the single source of truth for some key information shared + * across multiple modules within Recon, including: + * 1) ReconNodeManager + * 2) OzoneManagerServiceProviderImpl + * + * If current Recon is not healthy, isHealthy check will return true/false accordingly. + * UnHealthyModuleErrorMap will maintain the error info for the module failed to load. + */ +@Singleton +public final class ReconContext { + private static final Logger LOG = LoggerFactory.getLogger(ReconContext.class); + + private final String threadNamePrefix; + private String clusterId; + private OzoneConfiguration ozoneConfiguration; + private ReconUtils reconUtils; + private AtomicBoolean isHealthy = new AtomicBoolean(true); + + private Map errCodeMsgMap = new HashMap<>(); + private Map> errCodeImpactMap = new HashMap<>(); + private List errors = Collections.synchronizedList(new ArrayList<>()); + + @Inject + public ReconContext(OzoneConfiguration configuration, ReconUtils reconUtils) { + this.reconUtils = reconUtils; + this.ozoneConfiguration = configuration; + threadNamePrefix = reconUtils.getReconNodeDetails(configuration).threadNamePrefix(); + initializeErrCodeMetaData(); + } + + private void initializeErrCodeMetaData() { + for (ErrorCode errorCode : ErrorCode.values()) { + errCodeMsgMap.put(errorCode, errorCode.getMessage()); + errCodeImpactMap.put(errorCode, errorCode.getImpacts()); + } + } + + /** + * @param healthStatus : update Health Status of Recon. + */ + public void updateHealthStatus(AtomicBoolean healthStatus) { + boolean oldHealthStatus = isHealthy.getAndSet(healthStatus.get()); + LOG.info("Update healthStatus of Recon from {} to {}.", oldHealthStatus, isHealthy.get()); + } + + public AtomicBoolean isHealthy() { + return isHealthy; + } + + public String threadNamePrefix() { + return threadNamePrefix; + } + + public Map getErrCodeMsgMap() { + return errCodeMsgMap; + } + + public Map> getErrCodeImpactMap() { + return errCodeImpactMap; + } + + public List getErrors() { + return errors; + } + + public void updateErrors(ErrorCode errorCode) { + errors.add(errorCode); + } + + public void setClusterId(String clusterId) { + this.clusterId = clusterId; + } + + public String getClusterId() { + return clusterId; + } + + /** + * Error codes to make it easy to decode these errors in Recon. + */ + public enum ErrorCode { + OK("Recon is healthy !!!", Collections.emptyList()), + INVALID_NETWORK_TOPOLOGY( + "Invalid network topology of datanodes. Failed to register and load datanodes. Pipelines may not be " + + "healthy !!!", Arrays.asList("Datanodes", "Pipelines")), + CERTIFICATE_INIT_FAILED( + "Error during initializing Recon certificate !!!", + Arrays.asList("Initializing secure Recon")), + INTERNAL_ERROR( + "Unexpected internal error. Kindly refer ozone-recon.log file for details !!!", + Arrays.asList("Recon health")), + GET_OM_DB_SNAPSHOT_FAILED( + "OM DB Snapshot sync failed !!!", + Arrays.asList("Overview (OM Data)", "OM DB Insights")); + + private final String message; + private final List impacts; + + ErrorCode(String message, List impacts) { + this.message = message; + this.impacts = impacts; + } + + public String getMessage() { + return message; + } + + public List getImpacts() { + return impacts; + } + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java index bb7ba4954d71..39f41395bc89 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java @@ -103,6 +103,7 @@ protected void configure() { bind(OzoneManagerServiceProvider.class) .to(OzoneManagerServiceProviderImpl.class).in(Singleton.class); bind(ReconUtils.class).in(Singleton.class); + bind(ReconContext.class).in(Singleton.class); // Persistence - inject configuration provider install(new JooqPersistenceModule( getProvider(DataSourceConfiguration.class))); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index ab87bda4412c..5c9e40396358 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -185,6 +185,7 @@ public final class ReconServerConfigKeys { public static final int OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_DEFAULT = 3; + /** * Private constructor for utility class. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 39d091ee03c8..76b601b1c0eb 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -32,6 +32,10 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import com.google.common.base.Preconditions; import com.google.inject.Singleton; @@ -54,16 +58,24 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT; import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig; import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; import static org.jooq.impl.DSL.currentTimestamp; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; import jakarta.annotation.Nonnull; +import com.google.common.annotations.VisibleForTesting; import org.jooq.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,9 +91,11 @@ public class ReconUtils { public ReconUtils() { } - private static final Logger LOG = LoggerFactory.getLogger( + private static Logger log = LoggerFactory.getLogger( ReconUtils.class); + private static AtomicBoolean rebuildTriggered = new AtomicBoolean(false); + public static File getReconScmDbDir(ConfigurationSource conf) { return new ReconUtils().getReconDbDir(conf, OZONE_RECON_SCM_DB_DIR); } @@ -121,7 +135,7 @@ public File getReconDbDir(ConfigurationSource conf, String dirConfigKey) { return metadataDir; } - LOG.warn("{} is not configured. We recommend adding this setting. " + + log.warn("{} is not configured. We recommend adding this setting. " + "Falling back to {} instead.", dirConfigKey, HddsConfigKeys.OZONE_METADATA_DIRS); return getOzoneMetaDirPath(conf); @@ -156,7 +170,7 @@ public static File createTarFile(Path sourcePath) throws IOException { org.apache.hadoop.io.IOUtils.closeStream(tarOs); org.apache.hadoop.io.IOUtils.closeStream(fileOutputStream); } catch (Exception e) { - LOG.error("Exception encountered when closing " + + log.error("Exception encountered when closing " + "TAR file output stream: " + e); } } @@ -221,7 +235,7 @@ public void untarCheckpointFile(File tarFile, Path destPath) if (entry.isDirectory()) { boolean success = f.mkdirs(); if (!success) { - LOG.error("Unable to create directory found in tar."); + log.error("Unable to create directory found in tar."); } } else { //Write contents of file in archive to a new file. @@ -244,25 +258,103 @@ public void untarCheckpointFile(File tarFile, Path destPath) } } + + /** + * Constructs the full path of a key from its OmKeyInfo using a bottom-up approach, starting from the leaf node. + * + * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched + * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from + * bottom to top, finally prepending the volume and bucket names to complete the full path. If the directory structure + * is currently being rebuilt (indicated by the rebuildTriggered flag), this method returns an empty string to signify + * that path construction is temporarily unavailable. + * + * @param omKeyInfo The OmKeyInfo object for the key + * @return The constructed full path of the key as a String, or an empty string if a rebuild is in progress and + * the path cannot be constructed at this time. + * @throws IOException + */ + public static String constructFullPath(OmKeyInfo omKeyInfo, + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager) + throws IOException { + + StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); + long parentId = omKeyInfo.getParentObjectID(); + boolean isDirectoryPresent = false; + + while (parentId != 0) { + NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId); + if (nsSummary == null) { + log.warn("NSSummary tree is currently being rebuilt or the directory could be in the progress of " + + "deletion, returning empty string for path construction."); + return ""; + } + if (nsSummary.getParentId() == -1) { + if (rebuildTriggered.compareAndSet(false, true)) { + triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); + } + log.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); + return ""; + } + fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); + + // Move to the parent ID of the current directory + parentId = nsSummary.getParentId(); + isDirectoryPresent = true; + } + + // Prepend the volume and bucket to the constructed path + String volumeName = omKeyInfo.getVolumeName(); + String bucketName = omKeyInfo.getBucketName(); + fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX); + if (isDirectoryPresent) { + return OmUtils.normalizeKey(fullPath.toString(), true); + } + return fullPath.toString(); + } + + private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager) { + ExecutorService executor = Executors.newSingleThreadExecutor(r -> { + Thread t = new Thread(r); + t.setName("RebuildNSSummaryThread"); + return t; + }); + + executor.submit(() -> { + long startTime = System.currentTimeMillis(); + log.info("Rebuilding NSSummary tree..."); + try { + reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); + } finally { + long endTime = System.currentTimeMillis(); + log.info("NSSummary tree rebuild completed in {} ms.", endTime - startTime); + } + }); + executor.shutdown(); + } + /** * Make HTTP GET call on the URL and return HttpURLConnection instance. + * * @param connectionFactory URLConnectionFactory to use. - * @param url url to call - * @param isSpnego is SPNEGO enabled + * @param url url to call + * @param isSpnego is SPNEGO enabled * @return HttpURLConnection instance of the HTTP call. * @throws IOException, AuthenticationException While reading the response. */ public HttpURLConnection makeHttpCall(URLConnectionFactory connectionFactory, - String url, boolean isSpnego) + String url, boolean isSpnego) throws IOException, AuthenticationException { HttpURLConnection urlConnection = (HttpURLConnection) - connectionFactory.openConnection(new URL(url), isSpnego); + connectionFactory.openConnection(new URL(url), isSpnego); urlConnection.connect(); return urlConnection; } /** * Load last known DB in Recon. + * * @param reconDbDir * @param fileNamePrefix * @return @@ -287,7 +379,7 @@ public File getLastKnownDB(File reconDbDir, String fileNamePrefix) { lastKnownSnapshotFileName = fileName; } } catch (NumberFormatException nfEx) { - LOG.warn("Unknown file found in Recon DB dir : {}", fileName); + log.warn("Unknown file found in Recon DB dir : {}", fileName); } } } @@ -322,6 +414,33 @@ public static void upsertGlobalStatsTable(Configuration sqlConfiguration, } } + /** + * Sorts a list of DiskUsage objects in descending order by size using parallel sorting and + * returns the top N records as specified by the limit. + * + * This method is optimized for large datasets and utilizes parallel processing to efficiently + * sort and retrieve the top N largest records by size. It's especially useful for reducing + * processing time and memory usage when only a subset of sorted records is needed. + * + * Advantages of this approach include: + * - Efficient handling of large datasets by leveraging multi-core processors. + * - Reduction in memory usage and improvement in processing time by limiting the + * number of returned records. + * - Scalability and easy integration with existing systems. + * + * @param diskUsageList the list of DiskUsage objects to be sorted. + * @param limit the maximum number of DiskUsage objects to return. + * @return a list of the top N DiskUsage objects sorted in descending order by size, + * where N is the specified limit. + */ + public static List sortDiskUsageDescendingWithLimit( + List diskUsageList, int limit) { + return diskUsageList.parallelStream() + .sorted((du1, du2) -> Long.compare(du2.getSize(), du1.getSize())) + .limit(limit) + .collect(Collectors.toList()); + } + public static long getFileSizeUpperBound(long fileSize) { if (fileSize >= ReconConstants.MAX_FILE_SIZE_UPPER_BOUND) { return Long.MAX_VALUE; @@ -385,4 +504,9 @@ public SCMNodeDetails getReconNodeDetails(OzoneConfiguration conf) { HddsServerUtil.getReconDataNodeBindAddress(conf)); return builder.build(); } + + @VisibleForTesting + public static void setLogger(Logger logger) { + log = logger; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index d838e9c36e57..86ef6c022d57 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.types.ContainerDiscrepancyInfo; import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; @@ -94,10 +95,7 @@ @AdminOnly public class ContainerEndpoint { - @Inject private ReconContainerMetadataManager reconContainerMetadataManager; - - @Inject private ReconOMMetadataManager omMetadataManager; private final ReconContainerManager containerManager; @@ -144,33 +142,38 @@ public static DataFilter fromValue(String value) { @Inject public ContainerEndpoint(OzoneStorageContainerManager reconSCM, - ContainerHealthSchemaManager containerHealthSchemaManager, - ReconNamespaceSummaryManager reconNamespaceSummaryManager) { + ContainerHealthSchemaManager containerHealthSchemaManager, + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconContainerMetadataManager reconContainerMetadataManager, + ReconOMMetadataManager omMetadataManager) { this.containerManager = (ReconContainerManager) reconSCM.getContainerManager(); this.pipelineManager = reconSCM.getPipelineManager(); this.containerHealthSchemaManager = containerHealthSchemaManager; this.reconNamespaceSummaryManager = reconNamespaceSummaryManager; this.reconSCM = reconSCM; + this.reconContainerMetadataManager = reconContainerMetadataManager; + this.omMetadataManager = omMetadataManager; } /** * Return @{@link org.apache.hadoop.hdds.scm.container} * for the containers starting from the given "prev-key" query param for the * given "limit". The given "prev-key" is skipped from the results returned. + * * @param prevKey the containerID after which results are returned. * start containerID, >=0, * start searching at the head if 0. - * @param limit max no. of containers to get. - * count must be >= 0 - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big. + * @param limit max no. of containers to get. + * count must be >= 0 + * Usually the count will be replace with a very big + * value instead of being unlimited in case the db is very big. * @return {@link Response} */ @GET public Response getContainers( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, + int limit, @DefaultValue(PREV_CONTAINER_ID_DEFAULT_VALUE) @QueryParam(RECON_QUERY_PREVKEY) long prevKey) { if (limit < 0 || prevKey < 0) { @@ -212,8 +215,8 @@ public Response getContainers( * starting from the given "prev-key" query param for the given "limit". * The given prevKeyPrefix is skipped from the results returned. * - * @param containerID the given containerID. - * @param limit max no. of keys to get. + * @param containerID the given containerID. + * @param limit max no. of keys to get. * @param prevKeyPrefix the key prefix after which results are returned. * @return {@link Response} */ @@ -226,7 +229,12 @@ public Response getKeysForContainer( @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKeyPrefix) { Map keyMetadataMap = new LinkedHashMap<>(); + + // Total count of keys in the container. long totalCount; + // Last key prefix to be used for pagination. It will be exposed in the response. + String lastKey = ""; + try { Map containerKeyPrefixMap = reconContainerMetadataManager.getKeyPrefixesForContainer(containerID, @@ -263,6 +271,7 @@ public Response getKeysForContainer( omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); + lastKey = ozoneKey; if (keyMetadataMap.containsKey(ozoneKey)) { keyMetadataMap.get(ozoneKey).getVersions() .add(containerKeyPrefix.getKeyVersion()); @@ -278,6 +287,8 @@ public Response getKeysForContainer( keyMetadata.setBucket(omKeyInfo.getBucketName()); keyMetadata.setVolume(omKeyInfo.getVolumeName()); keyMetadata.setKey(omKeyInfo.getKeyName()); + keyMetadata.setCompletePath(ReconUtils.constructFullPath(omKeyInfo, + reconNamespaceSummaryManager, omMetadataManager)); keyMetadata.setCreationTime( Instant.ofEpochMilli(omKeyInfo.getCreationTime())); keyMetadata.setModificationTime( @@ -298,7 +309,7 @@ public Response getKeysForContainer( Response.Status.INTERNAL_SERVER_ERROR); } KeysResponse keysResponse = - new KeysResponse(totalCount, keyMetadataMap.values()); + new KeysResponse(totalCount, keyMetadataMap.values(), lastKey); return Response.ok(keysResponse).build(); } @@ -334,7 +345,7 @@ public Response getMissingContainers( ) { List missingContainers = new ArrayList<>(); containerHealthSchemaManager.getUnhealthyContainers( - UnHealthyContainerStates.MISSING, 0, limit) + UnHealthyContainerStates.MISSING, 0, limit) .forEach(container -> { long containerID = container.getContainerId(); try { @@ -378,7 +389,7 @@ public Response getMissingContainers( public Response getUnhealthyContainers( @PathParam("state") String state, @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, + int limit, @DefaultValue(DEFAULT_BATCH_NUMBER) @QueryParam(RECON_QUERY_BATCH_PARAM) int batchNum) { int offset = Math.max(((batchNum - 1) * limit), 0); @@ -397,7 +408,13 @@ public Response getUnhealthyContainers( summary = containerHealthSchemaManager.getUnhealthyContainersSummary(); List containers = containerHealthSchemaManager .getUnhealthyContainers(internalState, offset, limit); - for (UnhealthyContainers c : containers) { + List emptyMissingFiltered = containers.stream() + .filter( + container -> !container.getContainerState() + .equals(UnHealthyContainerStates.EMPTY_MISSING.toString())) + .collect( + Collectors.toList()); + for (UnhealthyContainers c : emptyMissingFiltered) { long containerID = c.getContainerId(); ContainerInfo containerInfo = containerManager.getContainer(ContainerID.valueOf(containerID)); @@ -428,7 +445,6 @@ public Response getUnhealthyContainers( * Return * {@link org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata} * for all unhealthy containers. - * @param limit The limit of unhealthy containers to return. * @param batchNum The batch number (like "page number") of results to return. * Passing 1, will return records 1 to limit. 2 will return @@ -439,7 +455,7 @@ public Response getUnhealthyContainers( @Path("/unhealthy") public Response getUnhealthyContainers( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, + int limit, @DefaultValue(DEFAULT_BATCH_NUMBER) @QueryParam(RECON_QUERY_BATCH_PARAM) int batchNum) { return getUnhealthyContainers(null, limit, batchNum); @@ -514,6 +530,7 @@ public Response getSCMDeletedContainers( /** * Helper function to extract the blocks for a given container from a given * OM Key. + * * @param matchedKeys List of OM Key Info locations * @param containerID containerId. * @return List of blocks. @@ -698,7 +715,8 @@ public Response getContainerMisMatchInsights( } - /** This API retrieves set of deleted containers in SCM which are present + /** + * This API retrieves set of deleted containers in SCM which are present * in OM to find out list of keys mapped to such DELETED state containers. * * limit - limits the number of such SCM DELETED containers present in OM. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java index 5b104c461158..71040b9fdf64 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java @@ -101,6 +101,8 @@ public Response getBasicInfo( * @param path request path * @param listFile show subpath/disk usage for each key * @param withReplica count actual DU with replication + * @param sortSubpaths determines whether to sort the subpaths by their sizes in descending order + * and returns the N largest subpaths based on the configuration value DISK_USAGE_TOP_RECORDS_LIMIT. * @return DU response * @throws IOException */ @@ -108,10 +110,9 @@ public Response getBasicInfo( @Path("/du") @SuppressWarnings("methodlength") public Response getDiskUsage(@QueryParam("path") String path, - @DefaultValue("false") - @QueryParam("files") boolean listFile, - @DefaultValue("false") - @QueryParam("replica") boolean withReplica) + @DefaultValue("false") @QueryParam("files") boolean listFile, + @DefaultValue("false") @QueryParam("replica") boolean withReplica, + @DefaultValue("true") @QueryParam("sortSubPaths") boolean sortSubpaths) throws IOException { if (path == null || path.length() == 0) { return Response.status(Response.Status.BAD_REQUEST).build(); @@ -127,8 +128,7 @@ public Response getDiskUsage(@QueryParam("path") String path, reconNamespaceSummaryManager, omMetadataManager, reconSCM, path); - duResponse = handler.getDuResponse( - listFile, withReplica); + duResponse = handler.getDuResponse(listFile, withReplica, sortSubpaths); return Response.ok(duResponse).build(); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 968bfbc46343..a0bcfd302554 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -18,40 +18,64 @@ package org.apache.hadoop.ozone.recon.api; +import com.fasterxml.jackson.databind.JsonNode; +import com.google.common.base.Preconditions; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.client.DecommissionUtils; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.DatanodeInfo; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata; import org.apache.hadoop.ozone.recon.api.types.DatanodePipeline; import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport; import org.apache.hadoop.ozone.recon.api.types.DatanodesResponse; +import org.apache.hadoop.ozone.recon.api.types.RemoveDataNodesResponseWrapper; import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import javax.inject.Inject; +import javax.ws.rs.Consumes; import javax.ws.rs.GET; +import javax.ws.rs.PUT; import javax.ws.rs.Path; import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING; + /** * Endpoint to fetch details about datanodes. */ @@ -65,14 +89,18 @@ public class NodeEndpoint { private ReconNodeManager nodeManager; private ReconPipelineManager pipelineManager; private ReconContainerManager reconContainerManager; + private StorageContainerLocationProtocol scmClient; + private String errorMessage = "Error getting pipeline and container metrics for "; @Inject - NodeEndpoint(OzoneStorageContainerManager reconSCM) { + NodeEndpoint(OzoneStorageContainerManager reconSCM, + StorageContainerLocationProtocol scmClient) { this.nodeManager = (ReconNodeManager) reconSCM.getScmNodeManager(); - this.reconContainerManager = + this.reconContainerManager = (ReconContainerManager) reconSCM.getContainerManager(); this.pipelineManager = (ReconPipelineManager) reconSCM.getPipelineManager(); + this.scmClient = scmClient; } /** @@ -171,4 +199,253 @@ private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) { long committed = nodeStat.getCommitted().get(); return new DatanodeStorageReport(capacity, used, remaining, committed); } + + /** + * Removes datanodes from Recon's memory and nodes table in Recon DB. + * @param uuids the list of datanode uuid's + * + * @return JSON response with failed, not found and successfully removed datanodes list. + */ + @PUT + @Path("/remove") + @Consumes(MediaType.APPLICATION_JSON) + public Response removeDatanodes(List uuids) { + List failedDatanodes = new ArrayList<>(); + List notFoundDatanodes = new ArrayList<>(); + List removedDatanodes = new ArrayList<>(); + Map failedNodeErrorResponseMap = new HashMap<>(); + + Preconditions.checkNotNull(uuids, "Datanode list argument should not be null"); + Preconditions.checkArgument(!uuids.isEmpty(), "Datanode list argument should not be empty"); + try { + for (String uuid : uuids) { + DatanodeDetails nodeByUuid = nodeManager.getNodeByUuid(uuid); + try { + if (preChecksSuccess(nodeByUuid, failedNodeErrorResponseMap)) { + removedDatanodes.add(DatanodeMetadata.newBuilder() + .withHostname(nodeManager.getHostName(nodeByUuid)) + .withUUid(uuid) + .withState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) + .build()); + nodeManager.removeNode(nodeByUuid); + LOG.info("Node {} removed successfully !!!", uuid); + } else { + failedDatanodes.add(DatanodeMetadata.newBuilder() + .withHostname(nodeManager.getHostName(nodeByUuid)) + .withUUid(uuid) + .withOperationalState(nodeByUuid.getPersistedOpState()) + .withState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) + .build()); + } + } catch (NodeNotFoundException nnfe) { + LOG.error("Selected node {} not found : {} ", uuid, nnfe); + notFoundDatanodes.add(DatanodeMetadata.newBuilder() + .withHostname("") + .withState(NodeState.DEAD) + .withUUid(uuid).build()); + } + } + } catch (Exception exp) { + LOG.error("Unexpected Error while removing datanodes : {} ", exp); + throw new WebApplicationException(exp, Response.Status.INTERNAL_SERVER_ERROR); + } + + RemoveDataNodesResponseWrapper removeDataNodesResponseWrapper = new RemoveDataNodesResponseWrapper(); + + if (!failedDatanodes.isEmpty()) { + DatanodesResponse failedNodesResp = + new DatanodesResponse(failedDatanodes.size(), Collections.emptyList()); + failedNodesResp.setFailedNodeErrorResponseMap(failedNodeErrorResponseMap); + removeDataNodesResponseWrapper.getDatanodesResponseMap().put("failedDatanodes", failedNodesResp); + } + + if (!notFoundDatanodes.isEmpty()) { + DatanodesResponse notFoundNodesResp = + new DatanodesResponse(notFoundDatanodes.size(), notFoundDatanodes); + removeDataNodesResponseWrapper.getDatanodesResponseMap().put("notFoundDatanodes", notFoundNodesResp); + } + + if (!removedDatanodes.isEmpty()) { + DatanodesResponse removedNodesResp = + new DatanodesResponse(removedDatanodes.size(), removedDatanodes); + removeDataNodesResponseWrapper.getDatanodesResponseMap().put("removedDatanodes", removedNodesResp); + } + return Response.ok(removeDataNodesResponseWrapper).build(); + } + + private boolean preChecksSuccess(DatanodeDetails nodeByUuid, Map failedNodeErrorResponseMap) + throws NodeNotFoundException { + if (null == nodeByUuid) { + throw new NodeNotFoundException("Node not found !!!"); + } + NodeStatus nodeStatus = null; + AtomicBoolean isContainerOrPipeLineOpen = new AtomicBoolean(false); + try { + nodeStatus = nodeManager.getNodeStatus(nodeByUuid); + boolean isNodeDecommissioned = nodeByUuid.getPersistedOpState() == NodeOperationalState.DECOMMISSIONED; + if (isNodeDecommissioned || nodeStatus.isDead()) { + checkContainers(nodeByUuid, isContainerOrPipeLineOpen); + if (isContainerOrPipeLineOpen.get()) { + failedNodeErrorResponseMap.put(nodeByUuid.getUuidString(), "Open Containers/Pipelines"); + return false; + } + checkPipelines(nodeByUuid, isContainerOrPipeLineOpen); + if (isContainerOrPipeLineOpen.get()) { + failedNodeErrorResponseMap.put(nodeByUuid.getUuidString(), "Open Containers/Pipelines"); + return false; + } + return true; + } + } catch (NodeNotFoundException e) { + LOG.error("Node : {} not found", nodeByUuid); + return false; + } + failedNodeErrorResponseMap.put(nodeByUuid.getUuidString(), "DataNode should be in either DECOMMISSIONED " + + "operational state or DEAD node state."); + return false; + } + + private void checkPipelines(DatanodeDetails nodeByUuid, AtomicBoolean isContainerOrPipeLineOpen) { + nodeManager.getPipelines(nodeByUuid) + .forEach(id -> { + try { + final Pipeline pipeline = pipelineManager.getPipeline(id); + if (pipeline.isOpen()) { + LOG.warn("Pipeline : {} is still open for datanode: {}, pre-check failed, datanode not eligible " + + "for remove.", id.getId(), nodeByUuid.getUuid()); + isContainerOrPipeLineOpen.set(true); + return; + } + } catch (PipelineNotFoundException pipelineNotFoundException) { + LOG.warn("Pipeline {} is not managed by PipelineManager.", id, pipelineNotFoundException); + } + }); + } + + private void checkContainers(DatanodeDetails nodeByUuid, AtomicBoolean isContainerOrPipeLineOpen) + throws NodeNotFoundException { + nodeManager.getContainers(nodeByUuid) + .forEach(id -> { + try { + final ContainerInfo container = reconContainerManager.getContainer(id); + if (container.getState() == HddsProtos.LifeCycleState.OPEN) { + LOG.warn("Container : {} is still open for datanode: {}, pre-check failed, datanode not eligible " + + "for remove.", container.getContainerID(), nodeByUuid.getUuid()); + isContainerOrPipeLineOpen.set(true); + return; + } + } catch (ContainerNotFoundException cnfe) { + LOG.warn("Container {} is not managed by ContainerManager.", + id, cnfe); + } + }); + } + + /** + * This GET API provides the information of all datanodes for which decommissioning is initiated. + * @return the wrapped Response output + */ + @GET + @Path("/decommission/info") + public Response getDatanodesDecommissionInfo() { + try { + return getDecommissionStatusResponse(null, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * This GET API provides the information of a specific datanode for which decommissioning is initiated. + * API accepts both uuid or ipAddress, uuid will be given preference if both provided. + * @return the wrapped Response output + */ + @GET + @Path("/decommission/info/datanode") + public Response getDecommissionInfoForDatanode(@QueryParam("uuid") String uuid, + @QueryParam("ipAddress") String ipAddress) { + if (StringUtils.isEmpty(uuid)) { + Preconditions.checkNotNull(ipAddress, "Either uuid or ipAddress of a datanode should be provided !!!"); + Preconditions.checkArgument(!ipAddress.isEmpty(), + "Either uuid or ipAddress of a datanode should be provided !!!"); + } + try { + return getDecommissionStatusResponse(uuid, ipAddress); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private Response getDecommissionStatusResponse(String uuid, String ipAddress) throws IOException { + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + Map responseMap = new HashMap<>(); + Stream allNodes = scmClient.queryNode(DECOMMISSIONING, + null, HddsProtos.QueryScope.CLUSTER, "", ClientVersion.CURRENT_VERSION).stream(); + List decommissioningNodes = + DecommissionUtils.getDecommissioningNodesList(allNodes, uuid, ipAddress); + String metricsJson = scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + int numDecomNodes = -1; + JsonNode jsonNode = null; + if (metricsJson != null) { + jsonNode = DecommissionUtils.getBeansJsonNode(metricsJson); + numDecomNodes = DecommissionUtils.getNumDecomNodes(jsonNode); + } + List> dnDecommissionInfo = + getDecommissioningNodesDetails(decommissioningNodes, jsonNode, numDecomNodes); + try { + responseMap.put("DatanodesDecommissionInfo", dnDecommissionInfo); + builder.entity(responseMap); + return builder.build(); + } catch (Exception exception) { + LOG.error("Unexpected Error: {}", exception); + throw new WebApplicationException(exception, Response.Status.INTERNAL_SERVER_ERROR); + } + } + + private List> getDecommissioningNodesDetails(List decommissioningNodes, + JsonNode jsonNode, + int numDecomNodes) throws IOException { + List> decommissioningNodesDetails = new ArrayList<>(); + + for (HddsProtos.Node node : decommissioningNodes) { + DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( + node.getNodeID()); + Map datanodeMap = new LinkedHashMap<>(); + datanodeMap.put("datanodeDetails", datanode); + datanodeMap.put("metrics", getCounts(datanode, jsonNode, numDecomNodes)); + datanodeMap.put("containers", getContainers(datanode)); + decommissioningNodesDetails.add(datanodeMap); + } + return decommissioningNodesDetails; + } + + private Map getCounts(DatanodeDetails datanode, JsonNode counts, int numDecomNodes) { + Map countsMap = new LinkedHashMap<>(); + String errMsg = getErrorMessage() + datanode.getHostName(); + try { + countsMap = DecommissionUtils.getCountsMap(datanode, counts, numDecomNodes, countsMap, errMsg); + if (countsMap != null) { + return countsMap; + } + LOG.error(errMsg); + } catch (IOException e) { + LOG.error(errMsg + ": {} ", e); + } + return countsMap; + } + + private Map getContainers(DatanodeDetails datanode) + throws IOException { + Map> containers = scmClient.getContainersOnDecomNode(datanode); + return containers.entrySet().stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + entry -> entry.getValue().stream(). + map(ContainerID::toString). + collect(Collectors.toList()))); + } + + public String getErrorMessage() { + return errorMessage; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 84f55749a68f..baa9c522be10 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -58,6 +58,7 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; @@ -652,6 +653,36 @@ public Response getDeletedDirInfo( return Response.ok(deletedDirInsightInfo).build(); } + /** + * Retrieves the summary of deleted directories. + * + * This method calculates and returns a summary of deleted directories. + * @return The HTTP response body includes a map with the following entries: + * - "totalDeletedDirectories": the total number of deleted directories + * + * Example response: + * { + * "totalDeletedDirectories": 8, + * } + */ + @GET + @Path("/deletePending/dirs/summary") + public Response getDeletedDirectorySummary() { + Map dirSummary = new HashMap<>(); + // Create a keys summary for deleted directories + createSummaryForDeletedDirectories(dirSummary); + return Response.ok(dirSummary).build(); + } + + private void createSummaryForDeletedDirectories( + Map dirSummary) { + // Fetch the necessary metrics for deleted directories. + Long deletedDirCount = getValueFromId(globalStatsDao.findById( + OmTableInsightTask.getTableCountKeyFromTable(DELETED_DIR_TABLE))); + // Calculate the total number of deleted directories + dirSummary.put("totalDeletedDirectories", deletedDirCount); + } + private void updateReplicatedAndUnReplicatedTotal( KeyInsightInfoResponse deletedKeyAndDirInsightInfo, RepeatedOmKeyInfo repeatedOmKeyInfo) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java index 7ad961195ee7..00cd9617b5d3 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java @@ -36,6 +36,9 @@ import java.util.List; import java.util.Set; +import static org.apache.hadoop.ozone.recon.ReconConstants.DISK_USAGE_TOP_RECORDS_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconUtils.sortDiskUsageDescendingWithLimit; + /** * Class for handling bucket entity type. */ @@ -87,7 +90,7 @@ private BucketObjectDBInfo getBucketObjDbInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean sortSubpaths) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -142,7 +145,15 @@ public DUResponse getDuResponse( } duResponse.setCount(dirDUData.size()); duResponse.setSize(bucketDataSize); + + if (sortSubpaths) { + // Parallel sort directory/files DU data in descending order of size and returns the top N elements. + dirDUData = sortDiskUsageDescendingWithLimit(dirDUData, + DISK_USAGE_TOP_RECORDS_LIMIT); + } + duResponse.setDuData(dirDUData); + return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 09cbf4fe4e40..266caaa2d8e2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.ozone.recon.api.handlers; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -163,6 +165,8 @@ public static BucketHandler getBucketHandler( ReconOMMetadataManager omMetadataManager, OzoneStorageContainerManager reconSCM, OmBucketInfo bucketInfo) throws IOException { + // Check if enableFileSystemPaths flag is set to true. + boolean enableFileSystemPaths = isEnableFileSystemPaths(omMetadataManager); // If bucketInfo is null then entity type is UNKNOWN if (Objects.isNull(bucketInfo)) { @@ -172,15 +176,20 @@ public static BucketHandler getBucketHandler( .equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { return new FSOBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); - } else if (bucketInfo.getBucketLayout() - .equals(BucketLayout.LEGACY)) { - return new LegacyBucketHandler(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketInfo); + } else if (bucketInfo.getBucketLayout().equals(BucketLayout.LEGACY)) { + // Choose handler based on enableFileSystemPaths flag for legacy layout. + // If enableFileSystemPaths is false, then the legacy bucket is treated + // as an OBS bucket. + if (enableFileSystemPaths) { + return new LegacyBucketHandler(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, bucketInfo); + } else { + return new OBSBucketHandler(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, bucketInfo); + } } else if (bucketInfo.getBucketLayout() .equals(BucketLayout.OBJECT_STORE)) { - // TODO: HDDS-7810 Write a handler for object store bucket - // We can use LegacyBucketHandler for OBS bucket for now. - return new LegacyBucketHandler(reconNamespaceSummaryManager, + return new OBSBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); } else { LOG.error("Unsupported bucket layout: " + @@ -190,6 +199,22 @@ public static BucketHandler getBucketHandler( } } + /** + * Determines whether FileSystemPaths are enabled for Legacy Buckets + * based on the Ozone configuration. + * + * @param ReconOMMetadataManager Instance + * @return True if FileSystemPaths are enabled, false otherwise. + */ + private static boolean isEnableFileSystemPaths(ReconOMMetadataManager omMetadataManager) { + OzoneConfiguration configuration = omMetadataManager.getOzoneConfiguration(); + if (configuration == null) { + configuration = new OzoneConfiguration(); + } + return configuration.getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); + } + public static BucketHandler getBucketHandler( ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java index fc7022e2dab2..b535943081bd 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java @@ -39,6 +39,9 @@ import java.util.List; import java.util.Set; +import static org.apache.hadoop.ozone.recon.ReconConstants.DISK_USAGE_TOP_RECORDS_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconUtils.sortDiskUsageDescendingWithLimit; + /** * Class for handling directory entity type. */ @@ -80,7 +83,7 @@ private ObjectDBInfo getDirectoryObjDbInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean sortSubPaths) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -154,8 +157,14 @@ public DUResponse getDuResponse( } duResponse.setCount(subdirDUData.size()); duResponse.setSize(dirDataSize); - duResponse.setDuData(subdirDUData); + if (sortSubPaths) { + // Parallel sort subdirDUData in descending order of size and returns the top N elements. + subdirDUData = sortDiskUsageDescendingWithLimit(subdirDUData, + DISK_USAGE_TOP_RECORDS_LIMIT); + } + + duResponse.setDuData(subdirDUData); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index d12c7b6545ac..f2bcb58d3565 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -60,16 +61,25 @@ public EntityHandler( this.omMetadataManager = omMetadataManager; this.reconSCM = reconSCM; this.bucketHandler = bucketHandler; - normalizedPath = normalizePath(path); - names = parseRequestPath(normalizedPath); + // Defaulting to FILE_SYSTEM_OPTIMIZED if bucketHandler is null + BucketLayout layout = + (bucketHandler != null) ? bucketHandler.getBucketLayout() : + BucketLayout.FILE_SYSTEM_OPTIMIZED; + + // Normalize the path based on the determined layout + normalizedPath = normalizePath(path, layout); + + // Choose the parsing method based on the bucket layout + names = (layout == BucketLayout.OBJECT_STORE) ? + parseObjectStorePath(normalizedPath) : parseRequestPath(normalizedPath); } public abstract NamespaceSummaryResponse getSummaryResponse() throws IOException; public abstract DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean sort) throws IOException; public abstract QuotaUsageResponse getQuotaResponse() @@ -118,7 +128,8 @@ public static EntityHandler getEntityHandler( String path) throws IOException { BucketHandler bucketHandler; - String normalizedPath = normalizePath(path); + String normalizedPath = + normalizePath(path, BucketLayout.FILE_SYSTEM_OPTIMIZED); String[] names = parseRequestPath(normalizedPath); if (path.equals(OM_KEY_PREFIX)) { return EntityType.ROOT.create(reconNamespaceSummaryManager, @@ -156,23 +167,36 @@ public static EntityHandler getEntityHandler( String volName = names[0]; String bucketName = names[1]; - String keyName = BucketHandler.getKeyName(names); - + // Assuming getBucketHandler already validates volume and bucket existence bucketHandler = BucketHandler.getBucketHandler( - reconNamespaceSummaryManager, - omMetadataManager, reconSCM, - volName, bucketName); + reconNamespaceSummaryManager, omMetadataManager, reconSCM, volName, + bucketName); - // check if either volume or bucket doesn't exist - if (bucketHandler == null - || !omMetadataManager.volumeExists(volName) - || !bucketHandler.bucketExists(volName, bucketName)) { + if (bucketHandler == null) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); + } + + // Directly handle path normalization and parsing based on the layout + if (bucketHandler.getBucketLayout() == BucketLayout.OBJECT_STORE) { + String[] parsedObjectLayoutPath = parseObjectStorePath( + normalizePath(path, bucketHandler.getBucketLayout())); + if (parsedObjectLayoutPath == null) { + return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, null, path); + } + // Use the key part directly from the parsed path + return bucketHandler.determineKeyPath(parsedObjectLayoutPath[2]) + .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, + bucketHandler, path); + } else { + // Use the existing names array for non-OBJECT_STORE layouts to derive + // the keyName + String keyName = BucketHandler.getKeyName(names); + return bucketHandler.determineKeyPath(keyName) + .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, + bucketHandler, path); } - return bucketHandler.determineKeyPath(keyName) - .create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketHandler, path); } } @@ -256,7 +280,52 @@ public static String[] parseRequestPath(String path) { return names; } - private static String normalizePath(String path) { + /** + * Splits an object store path into volume, bucket, and key name components. + * + * This method parses a path of the format "/volumeName/bucketName/keyName", + * including paths with additional '/' characters within the key name. It's + * designed for object store paths where the first three '/' characters + * separate the root, volume and bucket names from the key name. + * + * @param path The object store path to parse, starting with a slash. + * @return A String array with three elements: volume name, bucket name, and + * key name, or {null} if the path format is invalid. + */ + public static String[] parseObjectStorePath(String path) { + // Removing the leading slash for correct splitting + path = path.substring(1); + + // Splitting the modified path by "/", limiting to 3 parts + String[] parts = path.split("/", 3); + + // Checking if we correctly obtained 3 parts after removing the leading slash + if (parts.length <= 3) { + return parts; + } else { + return null; + } + } + + /** + * Normalizes a given path based on the specified bucket layout. + * + * This method adjusts the path according to the bucket layout. + * For {OBJECT_STORE Layout}, it normalizes the path up to the bucket level + * using OmUtils.normalizePathUptoBucket. For other layouts, it + * normalizes the entire path, including the key, using + * OmUtils.normalizeKey, and does not preserve any trailing slashes. + * The normalized path will always be prefixed with OM_KEY_PREFIX to ensure it + * is consistent with the expected format for object storage paths in Ozone. + * + * @param path + * @param bucketLayout + * @return A normalized path + */ + private static String normalizePath(String path, BucketLayout bucketLayout) { + if (bucketLayout == BucketLayout.OBJECT_STORE) { + return OM_KEY_PREFIX + OmUtils.normalizePathUptoBucket(path); + } return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java index 26cda6442d4e..8a1c5babe75e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java @@ -42,7 +42,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** - * Class for handling FSO buckets. + * Class for handling FSO buckets NameSpaceSummaries. */ public class FSOBucketHandler extends BucketHandler { private static final Logger LOG = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java index a687bf3d0bdd..8ea26fd2846e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java @@ -71,7 +71,7 @@ private ObjectDBInfo getKeyDbObjectInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean sort) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index 3dd1ddbdabb9..09f1c5bc7454 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -41,7 +41,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** - * Class for handling Legacy buckets. + * Class for handling Legacy buckets NameSpaceSummaries. */ public class LegacyBucketHandler extends BucketHandler { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java new file mode 100644 index 000000000000..024eec989a10 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -0,0 +1,268 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.handlers; + + +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; + +/** + * Class for handling OBS buckets NameSpaceSummaries. + */ +public class OBSBucketHandler extends BucketHandler { + + private final String vol; + private final String bucket; + private final OmBucketInfo omBucketInfo; + + public OBSBucketHandler( + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager, + OzoneStorageContainerManager reconSCM, + OmBucketInfo bucketInfo) { + super(reconNamespaceSummaryManager, omMetadataManager, + reconSCM); + this.omBucketInfo = bucketInfo; + this.vol = omBucketInfo.getVolumeName(); + this.bucket = omBucketInfo.getBucketName(); + } + + /** + * Helper function to check if a path is a key, or invalid. + * + * @param keyName key name + * @return KEY, or UNKNOWN + * @throws IOException + */ + @Override + public EntityType determineKeyPath(String keyName) throws IOException { + String key = OM_KEY_PREFIX + vol + + OM_KEY_PREFIX + bucket + + OM_KEY_PREFIX + keyName; + + Table keyTable = getKeyTable(); + + try ( + TableIterator> + iterator = keyTable.iterator()) { + iterator.seek(key); + if (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + if (dbKey.equals(key)) { + return EntityType.KEY; + } + } + } + return EntityType.UNKNOWN; + } + + /** + * This method handles disk usage of direct keys. + * + * @param parentId The identifier for the parent bucket. + * @param withReplica if withReplica is enabled, set sizeWithReplica + * for each direct key's DU + * @param listFile if listFile is enabled, append key DU as a children + * keys + * @param duData the current DU data + * @param normalizedPath the normalized path request + * @return the total DU of all direct keys + * @throws IOException IOE + */ + @Override + public long handleDirectKeys(long parentId, boolean withReplica, + boolean listFile, + List duData, + String normalizedPath) throws IOException { + + NSSummary nsSummary = getReconNamespaceSummaryManager() + .getNSSummary(parentId); + // Handle the case of an empty bucket. + if (nsSummary == null) { + return 0; + } + + Table keyTable = getKeyTable(); + long keyDataSizeWithReplica = 0L; + + try ( + TableIterator> + iterator = keyTable.iterator()) { + + String seekPrefix = OM_KEY_PREFIX + + vol + + OM_KEY_PREFIX + + bucket + + OM_KEY_PREFIX; + + iterator.seek(seekPrefix); + + while (iterator.hasNext()) { + // KeyName : OmKeyInfo-Object + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + + // Exit loop if the key doesn't match the seekPrefix. + if (!dbKey.startsWith(seekPrefix)) { + break; + } + + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + String objectName = keyInfo.getKeyName(); + diskUsage.setSubpath(objectName); + diskUsage.setKey(true); + diskUsage.setSize(keyInfo.getDataSize()); + + if (withReplica) { + long keyDU = keyInfo.getReplicatedSize(); + keyDataSizeWithReplica += keyDU; + diskUsage.setSizeWithReplica(keyDU); + } + // List all the keys for the OBS bucket if requested. + if (listFile) { + duData.add(diskUsage); + } + } + } + } + + return keyDataSizeWithReplica; + } + + /** + * Calculates the total disk usage (DU) for an Object Store Bucket (OBS) by + * summing the sizes of all keys contained within the bucket. + * Since OBS buckets operate on a flat hierarchy, this method iterates through + * all the keys in the bucket without the need to traverse directories. + * + * @param parentId The identifier for the parent bucket. + * @return The total disk usage of all keys within the specified OBS bucket. + * @throws IOException + */ + @Override + public long calculateDUUnderObject(long parentId) throws IOException { + // Initialize the total disk usage variable. + long totalDU = 0L; + + // Access the key table for the bucket. + Table keyTable = getKeyTable(); + + try ( + TableIterator> + iterator = keyTable.iterator()) { + // Construct the seek prefix to filter keys under this bucket. + String seekPrefix = + OM_KEY_PREFIX + vol + OM_KEY_PREFIX + bucket + OM_KEY_PREFIX; + iterator.seek(seekPrefix); + + // Iterate over keys in the bucket. + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String keyName = kv.getKey(); + + // Break the loop if the current key does not start with the seekPrefix. + if (!keyName.startsWith(seekPrefix)) { + break; + } + + // Sum the size of each key to the total disk usage. + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + totalDU += keyInfo.getDataSize(); + } + } + } + + // Return the total disk usage of all keys in the bucket. + return totalDU; + } + + /** + * Object stores do not support directories. + * + * @throws UnsupportedOperationException + */ + @Override + public long getDirObjectId(String[] names) + throws UnsupportedOperationException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); + } + + /** + * Object stores do not support directories. + * + * @throws UnsupportedOperationException + */ + @Override + public long getDirObjectId(String[] names, int cutoff) + throws UnsupportedOperationException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); + } + + /** + * Returns the keyInfo object from the KEY table. + * @return OmKeyInfo + */ + @Override + public OmKeyInfo getKeyInfo(String[] names) throws IOException { + String ozoneKey = OM_KEY_PREFIX; + ozoneKey += String.join(OM_KEY_PREFIX, names); + + return getKeyTable().getSkipCache(ozoneKey); + } + + /** + * Object stores do not support directories. + * + * @throws UnsupportedOperationException + */ + @Override + public OmDirectoryInfo getDirInfo(String[] names) throws IOException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); + } + + public Table getKeyTable() { + return getOmMetadataManager().getKeyTable(getBucketLayout()); + } + + public BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } + +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java index fd0e58f191af..b67703257ac1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java @@ -39,6 +39,9 @@ import java.util.ArrayList; import java.util.List; +import static org.apache.hadoop.ozone.recon.ReconConstants.DISK_USAGE_TOP_RECORDS_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconUtils.sortDiskUsageDescendingWithLimit; + /** * Class for handling root entity type. */ @@ -88,7 +91,7 @@ private ObjectDBInfo getPrefixObjDbInfo() @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean sortSubPaths) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -137,6 +140,13 @@ public DUResponse getDuResponse( duResponse.setSizeWithReplica(totalDataSizeWithReplica); } duResponse.setSize(totalDataSize); + + if (sortSubPaths) { + // Parallel sort volumeDuData in descending order of size and returns the top N elements. + volumeDuData = sortDiskUsageDescendingWithLimit(volumeDuData, + DISK_USAGE_TOP_RECORDS_LIMIT); + } + duResponse.setDuData(volumeDuData); return duResponse; @@ -148,7 +158,8 @@ public QuotaUsageResponse getQuotaResponse() QuotaUsageResponse quotaUsageResponse = new QuotaUsageResponse(); SCMNodeStat stats = getReconSCM().getScmNodeManager().getStats(); long quotaInBytes = stats.getCapacity().get(); - long quotaUsedInBytes = getDuResponse(true, true).getSizeWithReplica(); + long quotaUsedInBytes = + getDuResponse(true, true, false).getSizeWithReplica(); quotaUsageResponse.setQuota(quotaInBytes); quotaUsageResponse.setQuotaUsed(quotaUsedInBytes); return quotaUsageResponse; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java index b5a5bd9a0be9..ab61ec38e8bf 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java @@ -51,7 +51,7 @@ public NamespaceSummaryResponse getSummaryResponse() @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean sort) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setStatus(ResponseStatus.PATH_NOT_FOUND); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java index fae508a99c9d..2ca9c352ce77 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java @@ -36,6 +36,10 @@ import java.util.ArrayList; import java.util.List; + +import static org.apache.hadoop.ozone.recon.ReconConstants.DISK_USAGE_TOP_RECORDS_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconUtils.sortDiskUsageDescendingWithLimit; + /** * Class for handling volume entity type. */ @@ -92,7 +96,7 @@ private VolumeObjectDBInfo getVolumeObjDbInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean sortSubPaths) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -131,6 +135,13 @@ public DUResponse getDuResponse( duResponse.setSizeWithReplica(volDataSizeWithReplica); } duResponse.setSize(volDataSize); + + if (sortSubPaths) { + // Parallel sort bucketDuData in descending order of size and returns the top N elements. + bucketDuData = sortDiskUsageDescendingWithLimit(bucketDuData, + DISK_USAGE_TOP_RECORDS_LIMIT); + } + duResponse.setDuData(bucketDuData); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java index d8a0cf5334e2..fae47b3b368c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java @@ -130,9 +130,7 @@ public static AclMetadata fromOzoneAcl(OzoneAcl ozoneAcl) { return builder.withType(ozoneAcl.getType().toString().toUpperCase()) .withName(ozoneAcl.getName()) .withScope(ozoneAcl.getAclScope().toString().toUpperCase()) - .withAclList(ozoneAcl.getAclList().stream().map(Enum::toString) - .map(String::toUpperCase) - .collect(Collectors.toList())) + .withAclList(ozoneAcl.getAclStringList()) .build(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 4927c4a1e86a..06c20a963a2a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.recon.api.types; +import com.fasterxml.jackson.annotation.JsonInclude; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -33,51 +34,66 @@ public final class DatanodeMetadata { @XmlElement(name = "uuid") + @JsonInclude(JsonInclude.Include.NON_NULL) private String uuid; @XmlElement(name = "hostname") + @JsonInclude(JsonInclude.Include.NON_NULL) private String hostname; @XmlElement(name = "state") + @JsonInclude(JsonInclude.Include.NON_NULL) private NodeState state; @XmlElement(name = "opState") + @JsonInclude(JsonInclude.Include.NON_NULL) private NodeOperationalState opState; @XmlElement(name = "lastHeartbeat") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long lastHeartbeat; @XmlElement(name = "storageReport") + @JsonInclude(JsonInclude.Include.NON_NULL) private DatanodeStorageReport datanodeStorageReport; @XmlElement(name = "pipelines") private List pipelines; @XmlElement(name = "containers") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int containers; @XmlElement(name = "openContainers") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int openContainers; @XmlElement(name = "leaderCount") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int leaderCount; @XmlElement(name = "version") + @JsonInclude(JsonInclude.Include.NON_NULL) private String version; @XmlElement(name = "setupTime") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long setupTime; @XmlElement(name = "revision") + @JsonInclude(JsonInclude.Include.NON_NULL) private String revision; @XmlElement(name = "buildDate") + @JsonInclude(JsonInclude.Include.NON_NULL) private String buildDate; @XmlElement(name = "layoutVersion") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int layoutVersion; @XmlElement(name = "networkLocation") + @JsonInclude(JsonInclude.Include.NON_NULL) private String networkLocation; private DatanodeMetadata(Builder builder) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java new file mode 100644 index 000000000000..e2312e2fdb37 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Class that represents the datanode metrics captured during decommissioning. + */ +public class DatanodeMetrics { + /** + * Start time of decommission of datanode. + */ + @JsonProperty("decommissionStartTime") + private String decommissionStartTime; + + /** + * Number of pipelines in unclosed status. + */ + @JsonProperty("numOfUnclosedPipelines") + private int numOfUnclosedPipelines; + + /** + * Number of under replicated containers. + */ + @JsonProperty("numOfUnderReplicatedContainers") + private double numOfUnderReplicatedContainers; + + /** + * Number of containers still not closed. + */ + @JsonProperty("numOfUnclosedContainers") + private double numOfUnclosedContainers; + + public String getDecommissionStartTime() { + return decommissionStartTime; + } + + public void setDecommissionStartTime(String decommissionStartTime) { + this.decommissionStartTime = decommissionStartTime; + } + + public int getNumOfUnclosedPipelines() { + return numOfUnclosedPipelines; + } + + public void setNumOfUnclosedPipelines(int numOfUnclosedPipelines) { + this.numOfUnclosedPipelines = numOfUnclosedPipelines; + } + + public double getNumOfUnderReplicatedContainers() { + return numOfUnderReplicatedContainers; + } + + public void setNumOfUnderReplicatedContainers(double numOfUnderReplicatedContainers) { + this.numOfUnderReplicatedContainers = numOfUnderReplicatedContainers; + } + + public double getNumOfUnclosedContainers() { + return numOfUnclosedContainers; + } + + public void setNumOfUnclosedContainers(double numOfUnclosedContainers) { + this.numOfUnclosedContainers = numOfUnclosedContainers; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodesResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodesResponse.java index 79b5a2e56f94..3644255a3f84 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodesResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodesResponse.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.ozone.recon.api.types; +import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.ArrayList; import java.util.Collection; +import java.util.Map; /** * Class that represents the API Response structure of Datanodes. @@ -35,8 +37,16 @@ public class DatanodesResponse { * An array of datanodes. */ @JsonProperty("datanodes") + @JsonInclude(JsonInclude.Include.NON_EMPTY) private Collection datanodes; + /** + * An API response msg. + */ + @JsonProperty("errors") + @JsonInclude(JsonInclude.Include.NON_EMPTY) + private Map failedNodeErrorResponseMap; + public DatanodesResponse() { this(0, new ArrayList<>()); @@ -55,4 +65,12 @@ public long getTotalCount() { public Collection getDatanodes() { return datanodes; } + + public Map getFailedNodeErrorResponseMap() { + return failedNodeErrorResponseMap; + } + + public void setFailedNodeErrorResponseMap(Map failedNodeErrorResponseMap) { + this.failedNodeErrorResponseMap = failedNodeErrorResponseMap; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java new file mode 100644 index 000000000000..aab2a2789bbe --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.ContainerID; + +import java.util.List; +import java.util.Map; + +/** + * Class that represents the API Response of decommissioning status info of datanode. + */ +public class DecommissionStatusInfoResponse { + /** + * Metadata of a datanode when decommissioning of datanode is in progress. + */ + @JsonProperty("datanodeDetails") + private DatanodeDetails dataNodeDetails; + + /** + * Metrics of datanode when decommissioning of datanode is in progress. + */ + @JsonProperty("metrics") + private DatanodeMetrics datanodeMetrics; + + /** + * containers info of a datanode when decommissioning of datanode is in progress. + */ + @JsonProperty("containers") + private Map> containers; + + public DatanodeDetails getDataNodeDetails() { + return dataNodeDetails; + } + + public void setDataNodeDetails(DatanodeDetails dataNodeDetails) { + this.dataNodeDetails = dataNodeDetails; + } + + public DatanodeMetrics getDatanodeMetrics() { + return datanodeMetrics; + } + + public void setDatanodeMetrics(DatanodeMetrics datanodeMetrics) { + this.datanodeMetrics = datanodeMetrics; + } + + public Map> getContainers() { + return containers; + } + + public void setContainers( + Map> containers) { + this.containers = containers; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java index c48e21d90f90..5094f47c24c2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java @@ -45,6 +45,9 @@ public class KeyMetadata { @XmlElement(name = "Key") private String key; + @XmlElement(name = "CompletePath") + private String completePath; + @XmlElement(name = "DataSize") private long dataSize; @@ -126,6 +129,14 @@ public void setBlockIds(Map> blockIds) { this.blockIds = blockIds; } + public String getCompletePath() { + return completePath; + } + + public void setCompletePath(String completePath) { + this.completePath = completePath; + } + /** * Class to hold ContainerID and BlockID. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java index 5b05975623c1..c09d28718e8b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java @@ -36,9 +36,13 @@ public class KeysResponse { @JsonProperty("keys") private Collection keys; - public KeysResponse(long totalCount, Collection keys) { + @JsonProperty("lastKey") + private String lastKey; + + public KeysResponse(long totalCount, Collection keys, String lastKey) { this.totalCount = totalCount; this.keys = keys; + this.lastKey = lastKey; } public long getTotalCount() { @@ -48,4 +52,7 @@ public long getTotalCount() { public Collection getKeys() { return keys; } + public String getLastKey() { + return lastKey; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java index c0f93aebe97d..0f774f01bf48 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java @@ -36,22 +36,25 @@ public class NSSummary { private int[] fileSizeBucket; private Set childDir; private String dirName; + private long parentId = 0; public NSSummary() { this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], - new HashSet<>(), ""); + new HashSet<>(), "", 0); } public NSSummary(int numOfFiles, long sizeOfFiles, int[] bucket, Set childDir, - String dirName) { + String dirName, + long parentId) { this.numOfFiles = numOfFiles; this.sizeOfFiles = sizeOfFiles; setFileSizeBucket(bucket); this.childDir = childDir; this.dirName = dirName; + this.parentId = parentId; } public int getNumOfFiles() { @@ -107,4 +110,12 @@ public void removeChildDir(long childId) { this.childDir.remove(childId); } } + + public long getParentId() { + return parentId; + } + + public void setParentId(long parentId) { + this.parentId = parentId; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/RemoveDataNodesResponseWrapper.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/RemoveDataNodesResponseWrapper.java new file mode 100644 index 000000000000..abbcc89ca2d0 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/RemoveDataNodesResponseWrapper.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonInclude; + +import javax.xml.bind.annotation.XmlElement; +import java.util.HashMap; +import java.util.Map; + +/** + * Class that represents the API Response structure of Datanodes. + */ +public class RemoveDataNodesResponseWrapper { + + @XmlElement(name = "datanodesResponseMap") + @JsonInclude(JsonInclude.Include.NON_NULL) + private Map datanodesResponseMap = new HashMap<>(); + + public Map getDatanodesResponseMap() { + return datanodesResponseMap; + } + + public void setDatanodesResponseMap( + Map datanodesResponseMap) { + this.datanodesResponseMap = datanodesResponseMap; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java index eaf08d9ca83e..ba03ec61f145 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java @@ -50,6 +50,12 @@ public class UnhealthyContainersResponse { @JsonProperty("misReplicatedCount") private long misReplicatedCount = 0; + /** + * Total count of containers with negative size. + */ + @JsonProperty("negativeSizeCount") + private long negativeSizeCount = 0; + /** * A collection of unhealthy containers. */ @@ -77,6 +83,9 @@ public void setSummaryCount(String state, long count) { } else if (state.equals( UnHealthyContainerStates.MIS_REPLICATED.toString())) { this.misReplicatedCount = count; + } else if (state.equals( + UnHealthyContainerStates.NEGATIVE_SIZE.toString())) { + this.negativeSizeCount = count; } } @@ -96,6 +105,10 @@ public long getMisReplicatedCount() { return misReplicatedCount; } + public long getNegativeSizeCount() { + return negativeSizeCount; + } + public Collection getContainers() { return containers; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index 09e0b2587934..f3b273451a2d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -65,9 +65,10 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { int stringLen = dirName.getBytes(StandardCharsets.UTF_8).length; int numOfChildDirs = childDirs.size(); final int resSize = NUM_OF_INTS * Integer.BYTES - + (numOfChildDirs + 1) * Long.BYTES // 1 long field + list size + + (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + list size + Short.BYTES // 2 dummy shorts to track length - + stringLen; // directory name length + + stringLen // directory name length + + Long.BYTES; // Added space for parentId serialization ByteArrayOutputStream out = new ByteArrayOutputStream(resSize); out.write(integerCodec.toPersistedFormat(object.getNumOfFiles())); @@ -84,6 +85,8 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { } out.write(integerCodec.toPersistedFormat(stringLen)); out.write(stringCodec.toPersistedFormat(dirName)); + out.write(longCodec.toPersistedFormat(object.getParentId())); + return out.toByteArray(); } @@ -117,6 +120,15 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { assert (bytesRead == strLen); String dirName = stringCodec.fromPersistedFormat(buffer); res.setDirName(dirName); + + // Check if there is enough data available to read the parentId + if (in.available() >= Long.BYTES) { + long parentId = in.readLong(); + res.setParentId(parentId); + } else { + // Set default parentId to -1 indicating it's from old format + res.setParentId(-1); + } return res; } @@ -128,6 +140,7 @@ public NSSummary copyObject(NSSummary object) { copy.setFileSizeBucket(object.getFileSizeBucket()); copy.setChildDir(object.getChildDir()); copy.setDirName(object.getDirName()); + copy.setParentId(object.getParentId()); return copy; } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 577fb7d2bcc1..2284fe84e6d4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.TOTAL_KEYS; import static org.apache.hadoop.ozone.recon.ReconConstants.TOTAL_USED_BYTES; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING; /** @@ -217,6 +218,8 @@ private void initializeUnhealthyContainerStateStatsMap( UnHealthyContainerStates.OVER_REPLICATED, new HashMap<>()); unhealthyContainerStateStatsMap.put( UnHealthyContainerStates.MIS_REPLICATED, new HashMap<>()); + unhealthyContainerStateStatsMap.put( + UnHealthyContainerStates.NEGATIVE_SIZE, new HashMap<>()); } private ContainerHealthStatus setCurrentContainer(long recordId) @@ -293,6 +296,8 @@ private long processExistingDBRecords(long currentTime, rec.update(); } } else { + LOG.info("DELETED existing unhealthy container record...for Container: {}", + currentContainer.getContainerID()); rec.delete(); } } catch (ContainerNotFoundException cnf) { @@ -313,13 +318,21 @@ private long processExistingDBRecords(long currentTime, private void processContainer(ContainerInfo container, long currentTime, Map> - unhealthyContainerStateStatsMap) { + unhealthyContainerStateStatsMap) { try { Set containerReplicas = containerManager.getContainerReplicas(container.containerID()); ContainerHealthStatus h = new ContainerHealthStatus(container, containerReplicas, placementPolicy, reconContainerMetadataManager, conf); + + // Handle negative sized containers separately + if (h.getContainer().getUsedBytes() < 0) { + handleNegativeSizedContainers(h, currentTime, + unhealthyContainerStateStatsMap); + return; + } + if (h.isHealthilyReplicated() || h.isDeleted()) { return; } @@ -365,6 +378,32 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { return false; } + /** + * This method is used to handle containers with negative sizes. It logs an + * error message and inserts a record into the UNHEALTHY_CONTAINERS table. + * @param containerHealthStatus + * @param currentTime + * @param unhealthyContainerStateStatsMap + */ + private void handleNegativeSizedContainers( + ContainerHealthStatus containerHealthStatus, long currentTime, + Map> + unhealthyContainerStateStatsMap) { + ContainerInfo container = containerHealthStatus.getContainer(); + LOG.error( + "Container {} has negative size. Please visit Recon's unhealthy " + + "container endpoint for more details.", + container.getContainerID()); + UnhealthyContainers record = + ContainerHealthRecords.recordForState(containerHealthStatus, + UnHealthyContainerStates.NEGATIVE_SIZE, currentTime); + List records = Collections.singletonList(record); + populateContainerStats(containerHealthStatus, + UnHealthyContainerStates.NEGATIVE_SIZE, + unhealthyContainerStateStatsMap); + containerHealthSchemaManager.insertUnhealthyContainerRecords(records); + } + /** * Helper methods to generate and update the required database records for * unhealthy containers. @@ -394,7 +433,7 @@ public static boolean retainOrUpdateRecord( boolean returnValue = false; switch (UnHealthyContainerStates.valueOf(rec.getContainerState())) { case MISSING: - returnValue = container.isMissing(); + returnValue = container.isMissing() && !container.isEmpty(); break; case MIS_REPLICATED: returnValue = keepMisReplicatedRecord(container, rec); @@ -459,10 +498,10 @@ public static List generateUnhealthyRecords( "starting with **Container State Stats:**"); } records.add( - recordForState(container, UnHealthyContainerStates.EMPTY_MISSING, + recordForState(container, EMPTY_MISSING, time)); populateContainerStats(container, - UnHealthyContainerStates.EMPTY_MISSING, + EMPTY_MISSING, unhealthyContainerStateStatsMap); } // A container cannot have any other records if it is missing so return diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java index 2f3de1debcde..57f7686263fa 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java @@ -71,7 +71,7 @@ private long getEntitySize(String path) throws IOException { EntityHandler.getEntityHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, path); if (null != entityHandler) { - DUResponse duResponse = entityHandler.getDuResponse(false, false); + DUResponse duResponse = entityHandler.getDuResponse(false, false, false); if (null != duResponse && duResponse.getStatus() == ResponseStatus.OK) { return duResponse.getSize(); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java index e6ad328ab98f..e1a3c97d2be2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java @@ -107,39 +107,39 @@ public void setAverageNumUpdatesInDeltaRequest(float avg) { averageNumUpdatesInDeltaRequest.set(avg); } - public MutableCounterLong getNumSnapshotRequests() { - return numSnapshotRequests; + public long getNumSnapshotRequests() { + return numSnapshotRequests.value(); } - public MutableCounterLong getNumSnapshotRequestsFailed() { - return numSnapshotRequestsFailed; + public long getNumSnapshotRequestsFailed() { + return numSnapshotRequestsFailed.value(); } - public MutableRate getSnapshotRequestLatency() { + MutableRate getSnapshotRequestLatency() { return snapshotRequestLatency; } - public MutableCounterLong getNumDeltaRequestsFailed() { - return numDeltaRequestsFailed; + public long getNumDeltaRequestsFailed() { + return numDeltaRequestsFailed.value(); } - public MutableCounterLong getNumUpdatesInDeltaTotal() { - return numUpdatesInDeltaTotal; + public long getNumUpdatesInDeltaTotal() { + return numUpdatesInDeltaTotal.value(); } - public MutableGaugeFloat getAverageNumUpdatesInDeltaRequest() { - return averageNumUpdatesInDeltaRequest; + public float getAverageNumUpdatesInDeltaRequest() { + return averageNumUpdatesInDeltaRequest.value(); } - public MutableCounterLong getNumNonZeroDeltaRequests() { - return numNonZeroDeltaRequests; + public long getNumNonZeroDeltaRequests() { + return numNonZeroDeltaRequests.value(); } public void setSequenceNumberLag(long lag) { sequenceNumberLag.set(lag); } - public MutableGaugeLong getSequenceNumberLag() { - return sequenceNumberLag; + public long getSequenceNumberLag() { + return sequenceNumberLag.value(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java index 364aff103a51..0c13376fa526 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java @@ -34,6 +34,9 @@ import org.jooq.DSLContext; import org.jooq.Record; import org.jooq.SelectQuery; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.List; /** @@ -41,6 +44,8 @@ */ @Singleton public class ContainerHealthSchemaManager { + private static final Logger LOG = + LoggerFactory.getLogger(ContainerHealthSchemaManager.class); private final UnhealthyContainersDao unhealthyContainersDao; private final ContainerSchemaDefinition containerSchemaDefinition; @@ -113,6 +118,12 @@ public Cursor getAllUnhealthyRecordsCursor() { } public void insertUnhealthyContainerRecords(List recs) { + if (LOG.isDebugEnabled()) { + recs.forEach(rec -> { + LOG.debug("rec.getContainerId() : {}, rec.getContainerState(): {} ", rec.getContainerId(), + rec.getContainerState()); + }); + } unhealthyContainersDao.insert(recs); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 2040b7b343d9..1fc114eabd75 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -105,4 +106,11 @@ List listBucketsUnderVolume(String volumeName, */ List listBucketsUnderVolume( String volumeName) throws IOException; + + /** + * Return the OzoneConfiguration instance used by Recon. + * @return + */ + OzoneConfiguration getOzoneConfiguration(); + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java index ad0526363df0..4b041f6511f6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java @@ -291,6 +291,11 @@ public List listBucketsUnderVolume(final String volumeName) Integer.MAX_VALUE); } + @Override + public OzoneConfiguration getOzoneConfiguration() { + return ozoneConfiguration; + } + private List listAllBuckets(final int maxNumberOfBuckets) throws IOException { List result = new ArrayList<>(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java index e43d5cce21c1..828942c8e5a5 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java @@ -85,7 +85,6 @@ public void onMessage(final DatanodeDetails datanodeDetails, } containerHealthTask.triggerContainerHealthCheck(); pipelineSyncTask.triggerPipelineSyncTask(); - containerSizeCountTask.process(containerManager.getContainers()); } catch (Exception ioEx) { LOG.error("Error trying to verify Node operational state from SCM.", ioEx); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java index ab919b7d9719..65a9530c5cac 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java @@ -25,10 +25,12 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; @@ -38,11 +40,13 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.utils.db.Table; @@ -52,6 +56,7 @@ import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; +import org.apache.hadoop.ozone.recon.ReconContext; import org.apache.hadoop.util.Time; import com.google.common.collect.ImmutableSet; @@ -71,6 +76,7 @@ public class ReconNodeManager extends SCMNodeManager { .getLogger(ReconNodeManager.class); private Table nodeDB; + private ReconContext reconContext; private static final Set ALLOWED_COMMANDS = ImmutableSet.of(reregisterCommand); @@ -98,6 +104,13 @@ public ReconNodeManager(OzoneConfiguration conf, this.reconDatanodeOutdatedTime = reconStaleDatanodeMultiplier * HddsServerUtil.getReconHeartbeatInterval(conf); this.nodeDB = nodeDB; + } + + public ReconNodeManager(OzoneConfiguration conf, SCMStorageConfig scmStorageConfig, EventQueue eventQueue, + NetworkTopology clusterMap, Table table, + HDDSLayoutVersionManager scmLayoutVersionManager, ReconContext reconContext) { + this(conf, scmStorageConfig, eventQueue, clusterMap, table, scmLayoutVersionManager); + this.reconContext = reconContext; loadExistingNodes(); } @@ -276,8 +289,23 @@ public RegisteredCommand register( datanodeDetails.getUuid()); } } - return super.register(datanodeDetails, nodeReport, pipelineReportsProto, - layoutInfo); + try { + RegisteredCommand registeredCommand = super.register(datanodeDetails, nodeReport, pipelineReportsProto, + layoutInfo); + reconContext.updateHealthStatus(new AtomicBoolean(true)); + reconContext.getErrors().remove(ReconContext.ErrorCode.INVALID_NETWORK_TOPOLOGY); + return registeredCommand; + } catch (InvalidTopologyException invalidTopologyException) { + LOG.error("InvalidTopologyException error occurred : {}", invalidTopologyException.getMessage()); + reconContext.updateHealthStatus(new AtomicBoolean(false)); + reconContext.getErrors().add(ReconContext.ErrorCode.INVALID_NETWORK_TOPOLOGY); + return RegisteredCommand.newBuilder() + .setErrorCode( + StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.errorNodeNotPermitted) + .setDatanode(datanodeDetails) + .setClusterID(reconContext.getClusterId()) + .build(); + } } public void updateNodeOperationalStateFromScm(HddsProtos.Node scmNode, @@ -321,4 +349,61 @@ public long getNodeDBKeyCount() throws IOException { return nodeCount; } } + + /** + * Remove an existing node from the NodeDB. Explicit removal from admin user. + * First this API call removes the node info from NodeManager memory and + * if successful, then remove the node finally from NODES table as well. + * + * @param datanodeDetails Datanode details. + */ + @Override + public void removeNode(DatanodeDetails datanodeDetails) throws NodeNotFoundException, IOException { + try { + super.removeNode(datanodeDetails); + nodeDB.delete(datanodeDetails.getUuid()); + } catch (IOException ioException) { + LOG.error("Node {} deletion fails from Node DB.", datanodeDetails.getUuid()); + throw ioException; + } + datanodeHeartbeatMap.remove(datanodeDetails.getUuid()); + inMemDatanodeDetails.remove(datanodeDetails.getUuid()); + LOG.info("Removed existing node {} from Node DB and NodeManager data structures in memory ", + datanodeDetails.getUuid()); + } + + @VisibleForTesting + public ReconContext getReconContext() { + return reconContext; + } + + @Override + protected void sendFinalizeToDatanodeIfNeeded(DatanodeDetails datanodeDetails, + LayoutVersionProto layoutVersionReport) { + // Recon should do nothing here. + int scmSlv = getLayoutVersionManager().getSoftwareLayoutVersion(); + int scmMlv = getLayoutVersionManager().getMetadataLayoutVersion(); + int dnSlv = layoutVersionReport.getSoftwareLayoutVersion(); + int dnMlv = layoutVersionReport.getMetadataLayoutVersion(); + + if (dnSlv > scmSlv) { + LOG.error("Invalid data node reporting to Recon : {}. " + + "DataNode SoftwareLayoutVersion = {}, Recon/SCM " + + "SoftwareLayoutVersion = {}", + datanodeDetails.getHostName(), dnSlv, scmSlv); + } + + if (scmMlv == scmSlv) { + // Recon metadata is finalised. + if (dnMlv < scmMlv) { + if (LOG.isDebugEnabled()) { + LOG.debug("Data node {} reports a lower MLV than Recon " + + "DataNode MetadataLayoutVersion = {}, Recon/SCM " + + "MetadataLayoutVersion = {}. SCM needs to finalize this DN", + datanodeDetails.getHostName(), dnMlv, scmMlv); + } + } + } + + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index 046662398f1b..9f6bfcef0091 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.recon.ReconContext; import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.fsck.ContainerHealthTask; @@ -147,6 +148,9 @@ public class ReconStorageContainerManagerFacade private final ReconDatanodeProtocolServer datanodeProtocolServer; private final EventQueue eventQueue; private final SCMContext scmContext; + // This will hold the recon related information like health status and errors in initialization of modules if any, + // which can later be used for alerts integration or displaying some meaningful info to user on Recon UI. + private final ReconContext reconContext; private final SCMStorageConfig scmStorageConfig; private final SCMNodeDetails reconNodeDetails; private final SCMHAManager scmhaManager; @@ -175,18 +179,20 @@ public class ReconStorageContainerManagerFacade @Inject @SuppressWarnings({"checkstyle:ParameterNumber", "checkstyle:MethodLength"}) public ReconStorageContainerManagerFacade(OzoneConfiguration conf, - StorageContainerServiceProvider scmServiceProvider, - ReconTaskStatusDao reconTaskStatusDao, - ContainerCountBySizeDao containerCountBySizeDao, - UtilizationSchemaDefinition utilizationSchemaDefinition, - ContainerHealthSchemaManager containerHealthSchemaManager, - ReconContainerMetadataManager reconContainerMetadataManager, - ReconUtils reconUtils, - ReconSafeModeManager safeModeManager) throws IOException { + StorageContainerServiceProvider scmServiceProvider, + ReconTaskStatusDao reconTaskStatusDao, + ContainerCountBySizeDao containerCountBySizeDao, + UtilizationSchemaDefinition utilizationSchemaDefinition, + ContainerHealthSchemaManager containerHealthSchemaManager, + ReconContainerMetadataManager reconContainerMetadataManager, + ReconUtils reconUtils, + ReconSafeModeManager safeModeManager, + ReconContext reconContext) throws IOException { reconNodeDetails = reconUtils.getReconNodeDetails(conf); this.threadNamePrefix = reconNodeDetails.threadNamePrefix(); this.eventQueue = new EventQueue(threadNamePrefix); eventQueue.setSilent(true); + this.reconContext = reconContext; this.scmContext = new SCMContext.Builder() .setIsPreCheckComplete(true) .setSCM(this) @@ -220,10 +226,11 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, true, new SCMDBTransactionBufferImpl()); this.sequenceIdGen = new SequenceIdGenerator( conf, scmhaManager, ReconSCMDBDefinition.SEQUENCE_ID.getTable(dbStore)); + reconContext.setClusterId(scmStorageConfig.getClusterID()); this.nodeManager = new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, ReconSCMDBDefinition.NODES.getTable(dbStore), - this.scmLayoutVersionManager); + this.scmLayoutVersionManager, reconContext); placementMetrics = SCMContainerPlacementMetrics.create(); this.containerPlacementPolicy = ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java index 6cb93e7134a2..ea0ff6ed5df4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import java.io.IOException; @@ -45,4 +46,6 @@ void batchStoreNSSummaries(BatchOperation batch, long objectId, void commitBatchOperation(RDBBatchOperation rdbBatchOperation) throws IOException; + + void rebuildNSSummaryTree(OMMetadataManager omMetadataManager); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java index 42a30095f315..9167854a8263 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java @@ -22,8 +22,11 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTask; + import static org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider.truncateTable; import javax.inject.Inject; @@ -39,12 +42,14 @@ public class ReconNamespaceSummaryManagerImpl private Table nsSummaryTable; private DBStore namespaceDbStore; + private NSSummaryTask nsSummaryTask; @Inject - public ReconNamespaceSummaryManagerImpl(ReconDBProvider reconDBProvider) + public ReconNamespaceSummaryManagerImpl(ReconDBProvider reconDBProvider, NSSummaryTask nsSummaryTask) throws IOException { namespaceDbStore = reconDBProvider.getDbStore(); this.nsSummaryTable = NAMESPACE_SUMMARY.getTable(namespaceDbStore); + this.nsSummaryTask = nsSummaryTask; } @Override @@ -81,6 +86,11 @@ public void commitBatchOperation(RDBBatchOperation rdbBatchOperation) this.namespaceDbStore.commitBatchOperation(rdbBatchOperation); } + @Override + public void rebuildNSSummaryTree(OMMetadataManager omMetadataManager) { + nsSummaryTask.reprocess(omMetadataManager); + } + public Table getNSSummaryTable() { return nsSummaryTable; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java index fb387861f0e3..105406f2bdf6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.scm.ReconScmTask; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.ContainerCountBySizeDao; import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; @@ -34,13 +35,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.ArrayList; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; import static org.hadoop.ozone.recon.schema.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; @@ -60,6 +62,8 @@ public class ContainerSizeCountTask extends ReconScmTask { private ContainerCountBySizeDao containerCountBySizeDao; private DSLContext dslContext; private HashMap processedContainers = new HashMap<>(); + private Map> + unhealthyContainerStateStatsMap; private ReadWriteLock lock = new ReentrantReadWriteLock(true); public ContainerSizeCountTask( @@ -121,7 +125,17 @@ protected synchronized void run() { private void process(ContainerInfo container, Map map) { final ContainerID id = container.containerID(); - final long currentSize = container.getUsedBytes(); + final long usedBytes = container.getUsedBytes(); + final long currentSize; + + if (usedBytes < 0) { + LOG.warn("Negative usedBytes ({}) for container {}, treating it as 0", + usedBytes, id); + currentSize = 0; + } else { + currentSize = usedBytes; + } + final Long previousSize = processedContainers.put(id, currentSize); if (previousSize != null) { decrementContainerSizeCount(previousSize, map); @@ -132,24 +146,27 @@ private void process(ContainerInfo container, /** * The process() function is responsible for updating the counts of * containers being tracked in a containerSizeCountMap based on the - * ContainerInfo objects in the list containers.It then iterates through + * ContainerInfo objects in the list containers. It then iterates through * the list of containers and does the following for each container: * - * 1) If the container is not present in processedContainers, - * it is a new container, so it is added to the processedContainers map - * and the count for its size in the containerSizeCountMap is incremented - * by 1 using the handlePutKeyEvent() function. - * 2) If the container is present in processedContainers but its size has - * been updated to the new size then the count for the old size in the - * containerSizeCountMap is decremented by 1 using the - * handleDeleteKeyEvent() function. The count for the new size is then - * incremented by 1 using the handlePutKeyEvent() function. - * 3) If the container is not present in containers list, it means the - * container has been deleted. - * The remaining containers inside the deletedContainers map are the ones - * that are not in the cluster and need to be deleted. Finally, the counts in - * the containerSizeCountMap are written to the database using the - * writeCountsToDB() function. + * 1) If the container's state is not "deleted," it will be processed: + * - If the container is not present in processedContainers, it is a new + * container. Therefore, it is added to the processedContainers map, and + * the count for its size in the containerSizeCountMap is incremented by + * 1 using the handlePutKeyEvent() function. + * - If the container is present in processedContainers but its size has + * been updated to a new size, the count for the old size in the + * containerSizeCountMap is decremented by 1 using the + * handleDeleteKeyEvent() function. Subsequently, the count for the new + * size is incremented by 1 using the handlePutKeyEvent() function. + * + * 2) If the container's state is "deleted," it is skipped, as deleted + * containers are not processed. + * + * After processing, the remaining containers inside the deletedContainers map + * are those that are not in the cluster and need to be deleted from the total + * size counts. Finally, the counts in the containerSizeCountMap are written + * to the database using the writeCountsToDB() function. */ public void process(List containers) { lock.writeLock().lock(); @@ -161,7 +178,9 @@ public void process(List containers) { // Loop to handle container create and size-update operations for (ContainerInfo container : containers) { - // The containers present in the cache hence it is not yet deleted + if (container.getState().equals(DELETED)) { + continue; // Skip deleted containers + } deletedContainers.remove(container.containerID()); // For New Container being created try { @@ -246,10 +265,10 @@ public String getTaskName() { /** * - * The handleContainerDeleteOperations() function loops through the entries - * in the deletedContainers map and calls the handleDeleteKeyEvent() function - * for each one. This will decrement the size counts of those containers by - * one which are no longer present in the cluster + * Handles the deletion of containers by updating the tracking of processed containers + * and adjusting the count of containers based on their sizes. When a container is deleted, + * it is removed from the tracking of processed containers, and the count of containers + * corresponding to its size is decremented in the container size count map. * * Used by process() * @@ -261,6 +280,9 @@ private void handleContainerDeleteOperations( Map containerSizeCountMap) { for (Map.Entry containerId : deletedContainers.entrySet()) { + // processedContainers will only keep a track of all containers that have + // been processed except DELETED containers. + processedContainers.remove(containerId.getKey()); long containerSize = deletedContainers.get(containerId.getKey()); decrementContainerSizeCount(containerSize, containerSizeCountMap); } @@ -316,19 +338,26 @@ private static void updateContainerSizeCount(long containerSize, int delta, } /** - * * The purpose of this function is to categorize containers into different * size ranges, or "bins," based on their size. * The ContainerSizeCountKey object is used to store the upper bound value * for each size range, and is later used to lookup the count of containers * in that size range within a Map. * - * Used by decrementContainerSizeCount() and incrementContainerSizeCount() + * If the container size is 0, the method sets the size of + * ContainerSizeCountKey as zero without calculating the upper bound. Used by + * decrementContainerSizeCount() and incrementContainerSizeCount() * * @param containerSize to calculate the upperSizeBound */ private static ContainerSizeCountKey getContainerSizeCountKey( long containerSize) { + // If containerSize is 0, return a ContainerSizeCountKey with size 0 + if (containerSize == 0) { + return new ContainerSizeCountKey(0L); + } + + // Otherwise, calculate the upperSizeBound return new ContainerSizeCountKey( ReconUtils.getContainerSizeUpperBound(containerSize)); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java new file mode 100644 index 000000000000..5a6d7a256e49 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Manages records in the Deleted Table, updating counts and sizes of + * pending Key Deletions in the backend. + */ +public class DeletedKeysInsightHandler implements OmTableHandler { + + private static final Logger LOG = + LoggerFactory.getLogger(DeletedKeysInsightHandler.class); + + /** + * Invoked by the process method to add information on those keys that have + * been backlogged in the backend for deletion. + */ + @Override + public void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + result.getLeft()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + result.getRight()); + } else { + LOG.warn("Put event does not have the Key Info for {}.", + event.getKey()); + } + + } + + /** + * Invoked by the process method to remove information on those keys that have + * been successfully deleted from the backend. + */ + @Override + public void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> + count > 0 ? count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > result.getRight() ? size - result.getRight() : + 0L); + } else { + LOG.warn("Delete event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to update the statistics on the keys + * pending to be deleted. + */ + @Override + public void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + // The size of deleted keys cannot change hence no-op. + return; + } + + /** + * Invoked by the reprocess method to calculate the records count of the + * deleted table and the sizes of replicated and unreplicated keys that are + * pending deletion in Ozone. + */ + @Override + public Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv + .getValue(); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSize += result.getRight(); + replicatedSize += result.getLeft(); + count += repeatedOmKeyInfo.getOmKeyInfoList().size(); + } + } + } + return Triple.of(count, unReplicatedSize, replicatedSize); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 42356191c501..30fdb7c1292e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -38,6 +38,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; /** * Task to query data from OMDB and write into Recon RocksDB. @@ -62,12 +63,13 @@ */ public class NSSummaryTask implements ReconOmTask { private static final Logger LOG = - LoggerFactory.getLogger(NSSummaryTask.class); + LoggerFactory.getLogger(NSSummaryTask.class); private final ReconNamespaceSummaryManager reconNamespaceSummaryManager; private final ReconOMMetadataManager reconOMMetadataManager; private final NSSummaryTaskWithFSO nsSummaryTaskWithFSO; private final NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy; + private final NSSummaryTaskWithOBS nsSummaryTaskWithOBS; private final OzoneConfiguration ozoneConfiguration; @Inject @@ -86,6 +88,9 @@ public NSSummaryTask(ReconNamespaceSummaryManager this.nsSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( reconNamespaceSummaryManager, reconOMMetadataManager, ozoneConfiguration); + this.nsSummaryTaskWithOBS = new NSSummaryTaskWithOBS( + reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); } @Override @@ -95,20 +100,28 @@ public String getTaskName() { @Override public Pair process(OMUpdateEventBatch events) { - boolean success; - success = nsSummaryTaskWithFSO.processWithFSO(events); - if (success) { - success = nsSummaryTaskWithLegacy.processWithLegacy(events); - } else { + boolean success = nsSummaryTaskWithFSO.processWithFSO(events); + if (!success) { LOG.error("processWithFSO failed."); } + success = nsSummaryTaskWithLegacy.processWithLegacy(events); + if (!success) { + LOG.error("processWithLegacy failed."); + } + success = nsSummaryTaskWithOBS.processWithOBS(events); + if (!success) { + LOG.error("processWithOBS failed."); + } return new ImmutablePair<>(getTaskName(), success); } @Override public Pair reprocess(OMMetadataManager omMetadataManager) { + // Initialize a list of tasks to run in parallel Collection> tasks = new ArrayList<>(); + long startTime = System.nanoTime(); // Record start time + try { // reinit Recon RocksDB's namespace CF. reconNamespaceSummaryManager.clearNSSummaryTable(); @@ -122,6 +135,8 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { .reprocessWithFSO(omMetadataManager)); tasks.add(() -> nsSummaryTaskWithLegacy .reprocessWithLegacy(reconOMMetadataManager)); + tasks.add(() -> nsSummaryTaskWithOBS + .reprocessWithOBS(reconOMMetadataManager)); List> results; ThreadFactory threadFactory = new ThreadFactoryBuilder() @@ -137,17 +152,24 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { } } } catch (InterruptedException ex) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex); return new ImmutablePair<>(getTaskName(), false); } catch (ExecutionException ex2) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex2); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex2); return new ImmutablePair<>(getTaskName(), false); } finally { executorService.shutdown(); + + long endTime = System.nanoTime(); + // Convert to milliseconds + long durationInMillis = + TimeUnit.NANOSECONDS.toMillis(endTime - startTime); + + // Log performance metrics + LOG.info("Task execution time: {} milliseconds", durationInMillis); } + return new ImmutablePair<>(getTaskName(), true); } -} +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index f00d83e64a52..888ec5319f2f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -132,6 +132,8 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, curNSSummary = new NSSummary(); } curNSSummary.setDirName(dirName); + // Set the parent directory ID + curNSSummary.setParentId(parentObjectId); nsSummaryMap.put(objectId, curNSSummary); // Write the child dir list to the parent directory diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index ec1ccd0542fc..4555b976ffed 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -47,7 +47,7 @@ */ public class NSSummaryTaskWithLegacy extends NSSummaryTaskDbEventHandler { - private static final BucketLayout BUCKET_LAYOUT = BucketLayout.LEGACY; + private static final BucketLayout LEGACY_BUCKET_LAYOUT = BucketLayout.LEGACY; private static final Logger LOG = LoggerFactory.getLogger(NSSummaryTaskWithLegacy.class); @@ -71,16 +71,17 @@ public NSSummaryTaskWithLegacy(ReconNamespaceSummaryManager public boolean processWithLegacy(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); Map nsSummaryMap = new HashMap<>(); + ReconOMMetadataManager metadataManager = getReconOMMetadataManager(); while (eventIterator.hasNext()) { - OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); + OMDBUpdateEvent omdbUpdateEvent = + eventIterator.next(); OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction(); // we only process updates on OM's KeyTable String table = omdbUpdateEvent.getTable(); - boolean updateOnKeyTable = table.equals(KEY_TABLE); - if (!updateOnKeyTable) { + + if (!table.equals(KEY_TABLE)) { continue; } @@ -90,102 +91,26 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { OMDBUpdateEvent keyTableUpdateEvent = omdbUpdateEvent; Object value = keyTableUpdateEvent.getValue(); Object oldValue = keyTableUpdateEvent.getOldValue(); + if (!(value instanceof OmKeyInfo)) { LOG.warn("Unexpected value type {} for key {}. Skipping processing.", value.getClass().getName(), updatedKey); continue; } + OmKeyInfo updatedKeyInfo = (OmKeyInfo) value; OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue; - // KeyTable entries belong to both Legacy and OBS buckets. - // Check bucket layout and if it's OBS - // continue to the next iteration. - // Check just for the current KeyInfo. - String volumeName = updatedKeyInfo.getVolumeName(); - String bucketName = updatedKeyInfo.getBucketName(); - String bucketDBKey = getReconOMMetadataManager() - .getBucketKey(volumeName, bucketName); - // Get bucket info from bucket table - OmBucketInfo omBucketInfo = getReconOMMetadataManager() - .getBucketTable().getSkipCache(bucketDBKey); - - if (omBucketInfo.getBucketLayout() - .isObjectStore(enableFileSystemPaths)) { + if (!isBucketLayoutValid(metadataManager, updatedKeyInfo)) { continue; } - setKeyParentID(updatedKeyInfo); - - if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - switch (action) { - case PUT: - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - case DELETE: - handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - case UPDATE: - if (oldKeyInfo != null) { - // delete first, then put - setKeyParentID(oldKeyInfo); - handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); - } else { - LOG.warn("Update event does not have the old keyInfo for {}.", - updatedKey); - } - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - default: - LOG.debug("Skipping DB update event : {}", - omdbUpdateEvent.getAction()); - } + if (enableFileSystemPaths) { + processWithFileSystemLayout(updatedKeyInfo, oldKeyInfo, action, + nsSummaryMap); } else { - OmDirectoryInfo updatedDirectoryInfo = - new OmDirectoryInfo.Builder() - .setName(updatedKeyInfo.getKeyName()) - .setObjectID(updatedKeyInfo.getObjectID()) - .setParentObjectID(updatedKeyInfo.getParentObjectID()) - .build(); - - OmDirectoryInfo oldDirectoryInfo = null; - - if (oldKeyInfo != null) { - oldDirectoryInfo = - new OmDirectoryInfo.Builder() - .setName(oldKeyInfo.getKeyName()) - .setObjectID(oldKeyInfo.getObjectID()) - .setParentObjectID(oldKeyInfo.getParentObjectID()) - .build(); - } - - switch (action) { - case PUT: - handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - case DELETE: - handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - case UPDATE: - if (oldDirectoryInfo != null) { - // delete first, then put - handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap); - } else { - LOG.warn("Update event does not have the old dirInfo for {}.", - updatedKey); - } - handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - default: - LOG.debug("Skipping DB update event : {}", - omdbUpdateEvent.getAction()); - } + processWithObjectStoreLayout(updatedKeyInfo, oldKeyInfo, action, + nsSummaryMap); } } catch (IOException ioEx) { LOG.error("Unable to process Namespace Summary data in Recon DB. ", @@ -206,12 +131,118 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { return true; } + private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo, + OmKeyInfo oldKeyInfo, + OMDBUpdateEvent.OMDBUpdateAction action, + Map nsSummaryMap) + throws IOException { + setKeyParentID(updatedKeyInfo); + + if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldKeyInfo != null) { + setKeyParentID(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + default: + LOG.debug("Skipping DB update event for Key: {}", action); + } + } else { + OmDirectoryInfo updatedDirectoryInfo = new OmDirectoryInfo.Builder() + .setName(updatedKeyInfo.getKeyName()) + .setObjectID(updatedKeyInfo.getObjectID()) + .setParentObjectID(updatedKeyInfo.getParentObjectID()) + .build(); + + OmDirectoryInfo oldDirectoryInfo = null; + + if (oldKeyInfo != null) { + oldDirectoryInfo = + new OmDirectoryInfo.Builder() + .setName(oldKeyInfo.getKeyName()) + .setObjectID(oldKeyInfo.getObjectID()) + .setParentObjectID(oldKeyInfo.getParentObjectID()) + .build(); + } + + switch (action) { + case PUT: + handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldDirectoryInfo != null) { + handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old dirInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + default: + LOG.debug("Skipping DB update event for Directory: {}", action); + } + } + } + + private void processWithObjectStoreLayout(OmKeyInfo updatedKeyInfo, + OmKeyInfo oldKeyInfo, + OMDBUpdateEvent.OMDBUpdateAction action, + Map nsSummaryMap) + throws IOException { + setParentBucketId(updatedKeyInfo); + + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldKeyInfo != null) { + setParentBucketId(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + default: + LOG.debug("Skipping DB update event for Key: {}", action); + } + } + public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { Map nsSummaryMap = new HashMap<>(); try { Table keyTable = - omMetadataManager.getKeyTable(BUCKET_LAYOUT); + omMetadataManager.getKeyTable(LEGACY_BUCKET_LAYOUT); try (TableIterator> keyTableIter = keyTable.iterator()) { @@ -223,30 +254,29 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { // KeyTable entries belong to both Legacy and OBS buckets. // Check bucket layout and if it's OBS // continue to the next iteration. - String volumeName = keyInfo.getVolumeName(); - String bucketName = keyInfo.getBucketName(); - String bucketDBKey = omMetadataManager - .getBucketKey(volumeName, bucketName); - // Get bucket info from bucket table - OmBucketInfo omBucketInfo = omMetadataManager - .getBucketTable().getSkipCache(bucketDBKey); - - if (omBucketInfo.getBucketLayout() - .isObjectStore(enableFileSystemPaths)) { + if (!isBucketLayoutValid((ReconOMMetadataManager) omMetadataManager, + keyInfo)) { continue; } - setKeyParentID(keyInfo); - - if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - OmDirectoryInfo directoryInfo = - new OmDirectoryInfo.Builder() - .setName(keyInfo.getKeyName()) - .setObjectID(keyInfo.getObjectID()) - .setParentObjectID(keyInfo.getParentObjectID()) - .build(); - handlePutDirEvent(directoryInfo, nsSummaryMap); + if (enableFileSystemPaths) { + // The LEGACY bucket is a file system bucket. + setKeyParentID(keyInfo); + + if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + OmDirectoryInfo directoryInfo = + new OmDirectoryInfo.Builder() + .setName(keyInfo.getKeyName()) + .setObjectID(keyInfo.getObjectID()) + .setParentObjectID(keyInfo.getParentObjectID()) + .build(); + handlePutDirEvent(directoryInfo, nsSummaryMap); + } else { + handlePutKeyEvent(keyInfo, nsSummaryMap); + } } else { + // The LEGACY bucket is an object store bucket. + setParentBucketId(keyInfo); handlePutKeyEvent(keyInfo, nsSummaryMap); } if (!checkAndCallFlushToDB(nsSummaryMap)) { @@ -290,7 +320,7 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { getReconOMMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), parentKeyName); OmKeyInfo parentKeyInfo = getReconOMMetadataManager() - .getKeyTable(BUCKET_LAYOUT) + .getKeyTable(LEGACY_BUCKET_LAYOUT) .getSkipCache(fullParentKeyName); if (parentKeyInfo != null) { @@ -300,17 +330,53 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { "NSSummaryTaskWithLegacy is null"); } } else { - String bucketKey = getReconOMMetadataManager() - .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); - OmBucketInfo parentBucketInfo = - getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + setParentBucketId(keyInfo); + } + } - if (parentBucketInfo != null) { - keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); - } else { - throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithLegacy is null"); - } + /** + * Set the parent object ID for a bucket. + *@paramkeyInfo + *@throwsIOException + */ + private void setParentBucketId(OmKeyInfo keyInfo) + throws IOException { + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithLegacy is null"); } } + + /** + * Check if the bucket layout is LEGACY. + * @param metadataManager + * @param keyInfo + * @return + */ + private boolean isBucketLayoutValid(ReconOMMetadataManager metadataManager, + OmKeyInfo keyInfo) + throws IOException { + String volumeName = keyInfo.getVolumeName(); + String bucketName = keyInfo.getBucketName(); + String bucketDBKey = metadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = + metadataManager.getBucketTable().getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != LEGACY_BUCKET_LAYOUT) { + LOG.debug( + "Skipping processing for bucket {} as bucket layout is not LEGACY", + bucketName); + return false; + } + + return true; + } + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java new file mode 100644 index 000000000000..34c7dc967c3a --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -0,0 +1,236 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + + +/** + * Class for handling OBS specific tasks. + */ +public class NSSummaryTaskWithOBS extends NSSummaryTaskDbEventHandler { + + private static final BucketLayout BUCKET_LAYOUT = BucketLayout.OBJECT_STORE; + + private static final Logger LOG = + LoggerFactory.getLogger(NSSummaryTaskWithOBS.class); + + + public NSSummaryTaskWithOBS( + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager reconOMMetadataManager, + OzoneConfiguration ozoneConfiguration) { + super(reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + } + + + public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { + Map nsSummaryMap = new HashMap<>(); + + try { + Table keyTable = + omMetadataManager.getKeyTable(BUCKET_LAYOUT); + + try (TableIterator> + keyTableIter = keyTable.iterator()) { + + while (keyTableIter.hasNext()) { + Table.KeyValue kv = keyTableIter.next(); + OmKeyInfo keyInfo = kv.getValue(); + + // KeyTable entries belong to both Legacy and OBS buckets. + // Check bucket layout and if it's anything other than OBS, + // continue to the next iteration. + String volumeName = keyInfo.getVolumeName(); + String bucketName = keyInfo.getBucketName(); + String bucketDBKey = omMetadataManager + .getBucketKey(volumeName, bucketName); + // Get bucket info from bucket table + OmBucketInfo omBucketInfo = omMetadataManager + .getBucketTable().getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { + continue; + } + + setKeyParentID(keyInfo); + + handlePutKeyEvent(keyInfo, nsSummaryMap); + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } + } + } catch (IOException ioEx) { + LOG.error("Unable to reprocess Namespace Summary data in Recon DB. ", + ioEx); + return false; + } + + // flush and commit left out entries at end + if (!flushAndCommitNSToDB(nsSummaryMap)) { + return false; + } + LOG.info("Completed a reprocess run of NSSummaryTaskWithOBS"); + return true; + } + + public boolean processWithOBS(OMUpdateEventBatch events) { + Iterator eventIterator = events.getIterator(); + Map nsSummaryMap = new HashMap<>(); + + while (eventIterator.hasNext()) { + OMDBUpdateEvent omdbUpdateEvent = + eventIterator.next(); + OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction(); + + // We only process updates on OM's KeyTable + String table = omdbUpdateEvent.getTable(); + boolean updateOnKeyTable = table.equals(KEY_TABLE); + if (!updateOnKeyTable) { + continue; + } + + String updatedKey = omdbUpdateEvent.getKey(); + + try { + OMDBUpdateEvent keyTableUpdateEvent = omdbUpdateEvent; + Object value = keyTableUpdateEvent.getValue(); + Object oldValue = keyTableUpdateEvent.getOldValue(); + if (value == null) { + LOG.warn("Value is null for key {}. Skipping processing.", + updatedKey); + continue; + } else if (!(value instanceof OmKeyInfo)) { + LOG.warn("Unexpected value type {} for key {}. Skipping processing.", + value.getClass().getName(), updatedKey); + continue; + } + + OmKeyInfo updatedKeyInfo = (OmKeyInfo) value; + OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue; + + // KeyTable entries belong to both OBS and Legacy buckets. + // Check bucket layout and if it's anything other than OBS, + // continue to the next iteration. + String volumeName = updatedKeyInfo.getVolumeName(); + String bucketName = updatedKeyInfo.getBucketName(); + String bucketDBKey = + getReconOMMetadataManager().getBucketKey(volumeName, bucketName); + // Get bucket info from bucket table + OmBucketInfo omBucketInfo = getReconOMMetadataManager().getBucketTable() + .getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { + continue; + } + + setKeyParentID(updatedKeyInfo); + + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + case UPDATE: + if (oldKeyInfo != null) { + // delete first, then put + setKeyParentID(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKey); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + default: + LOG.debug("Skipping DB update event: {}", action); + } + + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } catch (IOException ioEx) { + LOG.error("Unable to process Namespace Summary data in Recon DB. ", + ioEx); + return false; + } + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } + + // Flush and commit left-out entries at the end + if (!flushAndCommitNSToDB(nsSummaryMap)) { + return false; + } + + LOG.info("Completed a process run of NSSummaryTaskWithOBS"); + return true; + } + + + /** + * KeyTable entries don't have the parentId set. + * In order to reuse the existing methods that rely on + * the parentId, we have to set it explicitly. + * Note: For an OBS key, the parentId will always correspond to the ID of the + * OBS bucket in which it is located. + * + * @param keyInfo + * @throws IOException + */ + private void setKeyParentID(OmKeyInfo keyInfo) + throws IOException { + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithOBS is null"); + } + } + +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java new file mode 100644 index 000000000000..5ae23b68a703 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Interface for handling PUT, DELETE and UPDATE events for size-related + * tables for OM Insights. + */ +public interface OmTableHandler { + + /** + * Handles a PUT event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The PUT event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Handles a DELETE event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The DELETE event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Handles an UPDATE event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The UPDATE event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Returns a triple with the total count of records (left), total unreplicated + * size (middle), and total replicated size (right) in the given iterator. + * Increments count for each record and adds the dataSize if a record's value + * is an instance of OmKeyInfo,RepeatedOmKeyInfo. + * If the iterator is null, returns (0, 0, 0). + * + * @param iterator The iterator over the table to be iterated. + * @return A Triple with three Long values representing the count, + * unReplicated size and replicated size. + * @throws IOException If an I/O error occurs during the iterator traversal. + */ + Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException; + + + /** + * Returns the count key for the given table. + * + * @param tableName The name of the table. + * @return The count key for the table. + */ + default String getTableCountKeyFromTable(String tableName) { + return tableName + "Count"; + } + + /** + * Returns the replicated size key for the given table. + * + * @param tableName The name of the table. + * @return The replicated size key for the table. + */ + default String getReplicatedSizeKeyFromTable(String tableName) { + return tableName + "ReplicatedDataSize"; + } + + /** + * Returns the unreplicated size key for the given table. + * + * @param tableName The name of the table. + * @return The unreplicated size key for the table. + */ + default String getUnReplicatedSizeKeyFromTable(String tableName) { + return tableName + "UnReplicatedDataSize"; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index c814d9d9e33f..3e84f311c942 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -26,8 +26,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; @@ -37,22 +35,20 @@ import java.io.IOException; import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; - - +import java.util.Collection; import java.util.Map.Entry; +import java.util.ArrayList; +import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.jooq.impl.DSL.currentTimestamp; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; +import static org.jooq.impl.DSL.currentTimestamp; /** * Class to iterate over the OM DB and store the total counts of volumes, @@ -65,14 +61,21 @@ public class OmTableInsightTask implements ReconOmTask { private GlobalStatsDao globalStatsDao; private Configuration sqlConfiguration; private ReconOMMetadataManager reconOMMetadataManager; + private Map tableHandlers; @Inject public OmTableInsightTask(GlobalStatsDao globalStatsDao, - Configuration sqlConfiguration, - ReconOMMetadataManager reconOMMetadataManager) { + Configuration sqlConfiguration, + ReconOMMetadataManager reconOMMetadataManager) { this.globalStatsDao = globalStatsDao; this.sqlConfiguration = sqlConfiguration; this.reconOMMetadataManager = reconOMMetadataManager; + + // Initialize table handlers + tableHandlers = new HashMap<>(); + tableHandlers.put(OPEN_KEY_TABLE, new OpenKeysInsightHandler()); + tableHandlers.put(OPEN_FILE_TABLE, new OpenKeysInsightHandler()); + tableHandlers.put(DELETED_TABLE, new DeletedKeysInsightHandler()); } /** @@ -90,8 +93,8 @@ public OmTableInsightTask(GlobalStatsDao globalStatsDao, @Override public Pair reprocess(OMMetadataManager omMetadataManager) { HashMap objectCountMap = initializeCountMap(); - HashMap unReplicatedSizeCountMap = initializeSizeMap(false); - HashMap replicatedSizeCountMap = initializeSizeMap(true); + HashMap unReplicatedSizeMap = initializeSizeMap(false); + HashMap replicatedSizeMap = initializeSizeMap(true); for (String tableName : getTaskTables()) { Table table = omMetadataManager.getTable(tableName); @@ -100,16 +103,16 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { return new ImmutablePair<>(getTaskName(), false); } - try ( - TableIterator> iterator - = table.iterator()) { - if (getTablesToCalculateSize().contains(tableName)) { - Triple details = getTableSizeAndCount(iterator); + try (TableIterator> iterator + = table.iterator()) { + if (tableHandlers.containsKey(tableName)) { + Triple details = + tableHandlers.get(tableName).getTableSizeAndCount(iterator); objectCountMap.put(getTableCountKeyFromTable(tableName), details.getLeft()); - unReplicatedSizeCountMap.put( + unReplicatedSizeMap.put( getUnReplicatedSizeKeyFromTable(tableName), details.getMiddle()); - replicatedSizeCountMap.put(getReplicatedSizeKeyFromTable(tableName), + replicatedSizeMap.put(getReplicatedSizeKeyFromTable(tableName), details.getRight()); } else { long count = Iterators.size(iterator); @@ -124,72 +127,17 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unReplicatedSizeCountMap.isEmpty()) { - writeDataToDB(unReplicatedSizeCountMap); + if (!unReplicatedSizeMap.isEmpty()) { + writeDataToDB(unReplicatedSizeMap); } - if (!replicatedSizeCountMap.isEmpty()) { - writeDataToDB(replicatedSizeCountMap); + if (!replicatedSizeMap.isEmpty()) { + writeDataToDB(replicatedSizeMap); } LOG.info("Completed a 'reprocess' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); } - /** - * Returns a triple with the total count of records (left), total unreplicated - * size (middle), and total replicated size (right) in the given iterator. - * Increments count for each record and adds the dataSize if a record's value - * is an instance of OmKeyInfo. If the iterator is null, returns (0, 0, 0). - * - * @param iterator The iterator over the table to be iterated. - * @return A Triple with three Long values representing the count, - * unreplicated size and replicated size. - * @throws IOException If an I/O error occurs during the iterator traversal. - */ - private Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException { - long count = 0; - long unReplicatedSize = 0; - long replicatedSize = 0; - - if (iterator != null) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - if (kv != null && kv.getValue() != null) { - if (kv.getValue() instanceof OmKeyInfo) { - OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); - unReplicatedSize += omKeyInfo.getDataSize(); - replicatedSize += omKeyInfo.getReplicatedSize(); - count++; - } - if (kv.getValue() instanceof RepeatedOmKeyInfo) { - RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv - .getValue(); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSize += result.getRight(); - replicatedSize += result.getLeft(); - // Since we can have multiple deleted keys of same name - count += repeatedOmKeyInfo.getOmKeyInfoList().size(); - } - } - } - } - - return Triple.of(count, unReplicatedSize, replicatedSize); - } - - /** - * Returns a collection of table names that require data size calculation. - */ - public Collection getTablesToCalculateSize() { - List taskTables = new ArrayList<>(); - taskTables.add(OPEN_KEY_TABLE); - taskTables.add(OPEN_FILE_TABLE); - taskTables.add(DELETED_TABLE); - return taskTables; - } - @Override public String getTaskName() { return "OmTableInsightTask"; @@ -211,10 +159,9 @@ public Pair process(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); // Initialize maps to store count and size information HashMap objectCountMap = initializeCountMap(); - HashMap unreplicatedSizeCountMap = initializeSizeMap(false); - HashMap replicatedSizeCountMap = initializeSizeMap(true); + HashMap unReplicatedSizeMap = initializeSizeMap(false); + HashMap replicatedSizeMap = initializeSizeMap(true); final Collection taskTables = getTaskTables(); - final Collection sizeRelatedTables = getTablesToCalculateSize(); // Process each update event while (eventIterator.hasNext()) { @@ -223,22 +170,21 @@ public Pair process(OMUpdateEventBatch events) { if (!taskTables.contains(tableName)) { continue; } - try { switch (omdbUpdateEvent.getAction()) { case PUT: - handlePutEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handlePutEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; case DELETE: - handleDeleteEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handleDeleteEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; case UPDATE: - handleUpdateEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handleUpdateEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; default: @@ -256,11 +202,11 @@ public Pair process(OMUpdateEventBatch events) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unreplicatedSizeCountMap.isEmpty()) { - writeDataToDB(unreplicatedSizeCountMap); + if (!unReplicatedSizeMap.isEmpty()) { + writeDataToDB(unReplicatedSizeMap); } - if (!replicatedSizeCountMap.isEmpty()) { - writeDataToDB(replicatedSizeCountMap); + if (!replicatedSizeMap.isEmpty()) { + writeDataToDB(replicatedSizeMap); } LOG.info("Completed a 'process' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); @@ -268,65 +214,34 @@ public Pair process(OMUpdateEventBatch events) { private void handlePutEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - if (sizeRelatedTables.contains(tableName)) { - handleSizeRelatedTablePutEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); - } else { - String countKey = getTableCountKeyFromTable(tableName); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - } - } - - private void handleSizeRelatedTablePutEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() instanceof OmKeyInfo) { - // Handle PUT for OpenKeyTable & OpenFileTable - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + omKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + omKeyInfo.getReplicatedSize()); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle PUT for DeletedTable - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + result.getLeft()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + result.getRight()); + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) + throws IOException { + OmTableHandler tableHandler = tableHandlers.get(tableName); + if (event.getValue() != null) { + if (tableHandler != null) { + tableHandler.handlePutEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); + } else { + String countKey = getTableCountKeyFromTable(tableName); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + } } } private void handleDeleteEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) + throws IOException { + OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (sizeRelatedTables.contains(tableName)) { - handleSizeRelatedTableDeleteEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); + if (tableHandler != null) { + tableHandler.handleDeleteEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); } else { String countKey = getTableCountKeyFromTable(tableName); objectCountMap.computeIfPresent(countKey, @@ -335,109 +250,28 @@ private void handleDeleteEvent(OMDBUpdateEvent event, } } - private void handleSizeRelatedTableDeleteEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() instanceof OmKeyInfo) { - // Handle DELETE for OpenKeyTable & OpenFileTable - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? count - 1L : 0L); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > omKeyInfo.getDataSize() ? - size - omKeyInfo.getDataSize() : 0L); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > omKeyInfo.getReplicatedSize() ? - size - omKeyInfo.getReplicatedSize() : 0L); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle DELETE for DeletedTable - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count > 0 ? - count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > result.getRight() ? size - result.getRight() : - 0L); - } - } private void handleUpdateEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (sizeRelatedTables.contains(tableName)) { + if (tableHandler != null) { // Handle update for only size related tables - handleSizeRelatedTableUpdateEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); + tableHandler.handleUpdateEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); } } } - - private void handleSizeRelatedTableUpdateEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - if (event.getOldValue() == null) { - LOG.warn("Update event does not have the old Key Info for {}.", - event.getKey()); - return; - } - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - // In Update event the count for the open table will not change. So we don't - // need to update the count. Except for RepeatedOmKeyInfo, for which the - // size of omKeyInfoList can change - if (event.getValue() instanceof OmKeyInfo) { - // Handle UPDATE for OpenKeyTable & OpenFileTable - OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); - OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldKeyInfo.getDataSize() + - newKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldKeyInfo.getReplicatedSize() + - newKeyInfo.getReplicatedSize()); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle UPDATE for DeletedTable - RepeatedOmKeyInfo oldRepeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getOldValue(); - RepeatedOmKeyInfo newRepeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? - count - oldRepeatedOmKeyInfo.getOmKeyInfoList().size() + - newRepeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair oldSize = oldRepeatedOmKeyInfo.getTotalSize(); - Pair newSize = newRepeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldSize.getLeft() + newSize.getLeft()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldSize.getRight() + newSize.getRight()); - } - } - - + /** + * Write the updated count and size information to the database. + * + * @param dataMap Map containing the updated count and size information. + */ private void writeDataToDB(Map dataMap) { List insertGlobalStats = new ArrayList<>(); List updateGlobalStats = new ArrayList<>(); @@ -461,6 +295,11 @@ private void writeDataToDB(Map dataMap) { globalStatsDao.update(updateGlobalStats); } + /** + * Initializes and returns a count map with the counts for the tables. + * + * @return The count map containing the counts for each table. + */ private HashMap initializeCountMap() { Collection tables = getTaskTables(); HashMap objectCountMap = new HashMap<>(tables.size()); @@ -478,11 +317,13 @@ private HashMap initializeCountMap() { * @return The size map containing the size counts for each table. */ private HashMap initializeSizeMap(boolean replicated) { - Collection tables = getTablesToCalculateSize(); - HashMap sizeCountMap = new HashMap<>(tables.size()); - for (String tableName : tables) { - String key = replicated ? getReplicatedSizeKeyFromTable(tableName) : - getUnReplicatedSizeKeyFromTable(tableName); + HashMap sizeCountMap = new HashMap<>(); + for (Map.Entry entry : tableHandlers.entrySet()) { + String tableName = entry.getKey(); + OmTableHandler tableHandler = entry.getValue(); + String key = + replicated ? tableHandler.getReplicatedSizeKeyFromTable(tableName) : + tableHandler.getUnReplicatedSizeKeyFromTable(tableName); sizeCountMap.put(key, getValueForKey(key)); } return sizeCountMap; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java new file mode 100644 index 000000000000..7a27d29d8f28 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Manages records in the OpenKey Table, updating counts and sizes of + * open keys in the backend. + */ +public class OpenKeysInsightHandler implements OmTableHandler { + + private static final Logger LOG = + LoggerFactory.getLogger(OpenKeysInsightHandler.class); + + /** + * Invoked by the process method to add information on those keys that have + * been open in the backend. + */ + @Override + public void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + omKeyInfo.getDataSize()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + omKeyInfo.getReplicatedSize()); + } else { + LOG.warn("Put event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to delete information on those keys that are + * no longer closed in the backend. + */ + @Override + public void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? count - 1L : 0L); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > omKeyInfo.getDataSize() ? + size - omKeyInfo.getDataSize() : 0L); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > omKeyInfo.getReplicatedSize() ? + size - omKeyInfo.getReplicatedSize() : 0L); + } else { + LOG.warn("Delete event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to update information on those open keys that + * have been updated in the backend. + */ + @Override + public void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + if (event.getValue() != null) { + if (event.getOldValue() == null) { + LOG.warn("Update event does not have the old Key Info for {}.", + event.getKey()); + return; + } + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + // In Update event the count for the open table will not change. So we + // don't need to update the count. + OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); + OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldKeyInfo.getDataSize() + + newKeyInfo.getDataSize()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldKeyInfo.getReplicatedSize() + + newKeyInfo.getReplicatedSize()); + } else { + LOG.warn("Update event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * This method is called by the reprocess method. It calculates the record + * counts for both the open key table and the open file table. Additionally, + * it computes the sizes of both replicated and unreplicated keys + * that are currently open in the backend. + */ + @Override + public Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); + unReplicatedSize += omKeyInfo.getDataSize(); + replicatedSize += omKeyInfo.getReplicatedSize(); + count++; + } + } + } + return Triple.of(count, unReplicatedSize, replicatedSize); + } + +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 204609f66fec..79ff9f8e7c6d 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -44,6 +44,42 @@ "replicationType": "RATIS", "replicationFactor": 1, "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc710", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "09d3a478-ff01-4ce6-ad75-f3ec79bcc711", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc712", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "09d3a478-ff01-4ce6-ad75-f3ec79bcc713", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc714", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "09d3a478-ff01-4ce6-ad75-f3ec79bcc715", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost1.storage.enterprise.com" } ], "containers": 80, @@ -1000,7 +1036,7 @@ ] }, "keys": { - "totalCount": 534, + "totalCount": 15, "keys": [ { "Volume": "vol-0-20448", @@ -1062,7 +1098,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77506", "DataSize": 10240, "Versions": [ 0 @@ -1081,7 +1117,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64512", "DataSize": 5692407, "Versions": [ 0 @@ -1100,7 +1136,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69105", "DataSize": 189407, "Versions": [ 0 @@ -1119,7 +1155,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77507", "DataSize": 10240, "Versions": [ 0 @@ -1138,7 +1174,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64513", "DataSize": 5692407, "Versions": [ 0 @@ -1157,7 +1193,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69106", "DataSize": 189407, "Versions": [ 0 @@ -1176,7 +1212,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77508", "DataSize": 10240, "Versions": [ 0 @@ -1195,7 +1231,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64514", "DataSize": 5692407, "Versions": [ 0 @@ -1214,7 +1250,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69107", "DataSize": 189407, "Versions": [ 0 @@ -1233,7 +1269,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77509", "DataSize": 10240, "Versions": [ 0 @@ -1252,7 +1288,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64515", "DataSize": 5692407, "Versions": [ 0 @@ -1271,7 +1307,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69109", "DataSize": 189407, "Versions": [ 0 @@ -3729,7 +3765,6 @@ "totalDeletedKeys": 3 }, "omMismatch":{ - "lastKey":11, "containerDiscrepancyInfo": [ { "containerId": 1, @@ -3924,12 +3959,7 @@ } ], "existsAt": "SCM" - } - ] - }, - "omMismatch1":{ - "lastKey":21, - "containerDiscrepancyInfo": [ + }, { "containerId": 11, "numberOfKeys": 1, @@ -4162,12 +4192,7 @@ } ] }, - "omMismatch2":{ - "lastKey": null, - "containerDiscrepancyInfo": [] - }, "scmMismatch":{ - "lastKey":11, "containerDiscrepancyInfo": [ { "containerId": 1, @@ -4362,12 +4387,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch1":{ - "lastKey":21, - "containerDiscrepancyInfo": [ + }, { "containerId": 11, "numberOfKeys": 1, @@ -4561,12 +4581,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch2":{ - "lastKey":31, - "containerDiscrepancyInfo": [ + }, { "containerId": 21, "numberOfKeys": 1, @@ -4760,12 +4775,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch3":{ - "lastKey":41, - "containerDiscrepancyInfo": [ + }, { "containerId": 31, "numberOfKeys": 1, @@ -4959,12 +4969,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch4":{ - "lastKey":51, - "containerDiscrepancyInfo": [ + }, { "containerId": 41, "numberOfKeys": 1, @@ -5161,12 +5166,7 @@ } ] }, - "scmMismatch5":{ - "lastKey": null, - "containerDiscrepancyInfo": [] - }, "nonFSO": { - "lastKey": "11", "keysSummary": { "totalUnreplicatedDataSize": 10485760, "totalReplicatedDataSize": 31457280, @@ -5196,13 +5196,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - } - ], - "status": "OK" - }, - "nonFSO1": { - "lastKey": "21", - "nonFSO": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191713", "path": "nonfso 11", @@ -5226,13 +5220,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - } - ], - "status": "OK" - }, - "nonFSO2": { - "lastKey": "31", - "nonFSO": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191713", "path": "nonfso 21", @@ -5260,19 +5248,7 @@ ], "status": "OK" }, - "nonFSO3": { - "keysSummary": { - "totalUnreplicatedDataSize": 10485760, - "totalReplicatedDataSize": 31457280, - "totalOpenKeys": 10 - }, - "lastKey": "", - "replicatedDataSize": 0, - "unreplicatedDataSize": 0, - "status": "OK" - }, "fso": { - "lastKey": "11", "fso": [ { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2401/110569623850191713", @@ -5400,14 +5376,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - - } - ], - "status": "OK" - }, - "fso1": { - "lastKey": "21", - "fso": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191713", "path": "11", @@ -5515,13 +5484,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - } - ], - "status": "OK" - }, - "fso2": { - "lastKey": "31", - "fso": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191713", "path": "21", @@ -5645,19 +5608,7 @@ ], "status": "OK" }, - "fso3": { - "keysSummary": { - "totalUnreplicatedDataSize": 10485760, - "totalReplicatedDataSize": 31457280, - "totalOpenKeys": 10 - }, - "lastKey": "", - "replicatedDataSize": 0, - "unreplicatedDataSize": 0, - "status": "OK" - }, "keydeletePending":{ - "lastKey": "/volume/bucket1/rcmeevblsf/106/-9223372036843950335", "keysSummary": { "totalUnreplicatedDataSize": 29291, "totalReplicatedDataSize": 87873, @@ -6015,16 +5966,7 @@ "updateIDset": true } ] - } - ], - "status": "OK" - }, - - "keydeletePending1": { - "lastKey":"/-9223372036854775552/-9223372036854775040/-9223372036852420095/2421/110569623850191", - "replicatedTotal": -1530804718628866300, - "unreplicatedTotal": -1530804718628866300, - "deletedKeyInfo": [ + }, { "omKeyInfoList": [ { @@ -6081,15 +6023,7 @@ "updateIDset": false } ] - } - ], - "status": "OK" - }, - "keydeletePending2": { - "lastKey":"31", - "replicatedTotal": -1530804718628866300, - "unreplicatedTotal": -1530804718628866300, - "deletedKeyInfo": [ + }, { "omKeyInfoList": [ { @@ -6205,12 +6139,7 @@ ], "status": "OK" }, - "keydeletePending3": { - "lastKey":"", - "deletedKeyInfo": [] - }, "deleted": { - "lastKey": "11", "containers": [ { "containerId": 1, @@ -6381,12 +6310,7 @@ "healthy": true } ] - } - ] - }, - "deleted1": { - "lastKey": "21", - "containers": [ + }, { "containerId": 11, "numberOfKeys": 2, @@ -6556,12 +6480,7 @@ "healthy": true } ] - } - ] - }, - "deleted2": { - "lastKey": "31", - "containers": [ + }, { "containerId": 21, "numberOfKeys": 2, @@ -6731,12 +6650,7 @@ "healthy": true } ] - } - ] - }, - "deleted3": { - "lastKey": "41", - "containers": [ + }, { "containerId": 31, "numberOfKeys": 2, @@ -6773,8 +6687,71 @@ } ] }, - "deleted4": { - "lastKey": null, - "containers": [] + "dirdeletePending": { + "replicatedDataSize": 0, + "unreplicatedDataSize": 0, + "deletedDirInfo": [ + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854764286/231010153900/-9223372036854760111", + "path": ".Trash/hadoop/231010153900", + "inStateSince": 1696952297266, + "size": 17289, + "replicatedSize": 100, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + }, + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854775552/dir3/-9223372036854774015", + "path": "dir1", + "inStateSince": 1696954980154, + "size": 1200, + "replicatedSize": 0, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + }, + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854764286/231010153900/-9223372036854760191", + "path": ".Trash/hadoop/231010153900", + "inStateSince": 1696952297266, + "size": 17289, + "replicatedSize": 0, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + }, + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854775552/dir3/-9223372036854774112", + "path": "dir21", + "inStateSince": 1696954980154, + "size": 17289, + "replicatedSize": 0, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + }, + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854775552/dir3/-922337203685477303", + "path": "dir22", + "inStateSince": 1696954980900, + "size": 20289, + "replicatedSize": 0, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + } + ], + "status": "OK" } } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json index 0bf0c69f5459..1e1f79d18754 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json @@ -37,39 +37,17 @@ "/keys/open/summary": "/keysOpenSummary", "/keys/deletePending/summary": "/keysdeletePendingSummary", - "/containers/mismatch?limit=*&prevKey=11&missingIn=OM" : "/omMismatch1", - "/containers/mismatch?limit=*&prevKey=21&missingIn=OM" : "/omMismatch2", - "/containers/mismatch?limit=*&prevKey=31&missingIn=OM" : "/omMismatch3", - "/containers/mismatch?limit=*&prevKey=41&missingIn=OM" : "/omMismatch4", + "/containers/mismatch?&missingIn=OM" : "/omMismatch", + "/containers/mismatch?limit=*&missingIn=OM" : "/omMismatch", - "/containers/mismatch?limit=*&prevKey=*&missingIn=OM" : "/omMismatch", + "/containers/mismatch?&missingIn=SCM" : "/scmMismatch", + "/containers/mismatch?limit=*&missingIn=SCM" : "/scmMismatch", - "/containers/mismatch?limit=*&prevKey=11&missingIn=SCM" : "/scmMismatch1", - "/containers/mismatch?limit=*&prevKey=21&missingIn=SCM" : "/scmMismatch2", - "/containers/mismatch?limit=*&prevKey=31&missingIn=SCM" : "/scmMismatch3", - "/containers/mismatch?limit=*&prevKey=41&missingIn=SCM" : "/scmMismatch4", - "/containers/mismatch?limit=*&prevKey=51&missingIn=SCM" : "/scmMismatch5", - - "/containers/mismatch?limit=*&prevKey=*&missingIn=SCM" : "/scmMismatch", - - "/keys/open?includeFso=false&includeNonFso=true&limit=*&prevKey=11": "/nonFSO1", - "/keys/open?includeFso=false&includeNonFso=true&limit=*&prevKey=21": "/nonFSO2", - "/keys/open?includeFso=false&includeNonFso=true&limit=*&prevKey=31": "/nonFSO3", "/keys/open?includeFso=false&includeNonFso=true&limit=*": "/nonFSO", - - "/keys/open?includeFso=true&includeNonFso=false&limit=*&prevKey=11": "/fso1", - "/keys/open?includeFso=true&includeNonFso=false&limit=*&prevKey=21": "/fso2", - "/keys/open?includeFso=true&includeNonFso=false&limit=*&prevKey=31": "/fso3", "/keys/open?includeFso=true&includeNonFso=false&limit=*": "/fso", - "/keys/deletePending?limit=*&prevKey=/volume/bucket1/rcmeevblsf/106/-9223372036843950335" : "/keydeletePending1", - "/keys/deletePending?limit=*&prevKey=/-9223372036854775552/-9223372036854775040/-9223372036852420095/2421/110569623850191" : "/keydeletePending2", - "/keys/deletePending?limit=*&prevKey=31" : "/keydeletePending3", "/keys/deletePending?limit=*" : "/keydeletePending", - "/containers/mismatch/deleted?limit=*&prevKey": "/deleted", - "/containers/mismatch/deleted?limit=*&prevKey=11": "/deleted1", - "/containers/mismatch/deleted?limit=*&prevKey=21": "/deleted2", - "/containers/mismatch/deleted?limit=*&prevKey=31": "/deleted3", - "/containers/mismatch/deleted?limit=*&prevKey=41": "/deleted4" + "/containers/mismatch/deleted?limit=*": "/deleted", + "/keys/deletePending/dirs?limit=*": "/dirdeletePending" } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index 3d1528fccb3e..41987c00ef35 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -16,7 +16,7 @@ "ag-charts-community": "^7.3.0", "ag-charts-react": "^7.3.0", "antd": "^3.26.20", - "axios": "^0.27.2", + "axios": "^0.28.0", "babel-jest": "^24.9.0", "babel-plugin-import": "^1.13.8", "classnames": "^2.3.2", @@ -25,7 +25,7 @@ "less": "^3.13.1", "less-loader": "^5.0.0", "moment": "^2.29.4", - "plotly.js": "^1.58.5", + "plotly.js": "^2.25.2", "pretty-ms": "^5.1.0", "react": "^16.8.6", "react-app-rewired": "^2.2.1", diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index d54bc8663918..957a0ed5d152 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -17,7 +17,7 @@ specifiers: ag-charts-community: ^7.3.0 ag-charts-react: ^7.3.0 antd: ^3.26.20 - axios: ^0.27.2 + axios: ^0.28.0 babel-jest: ^24.9.0 babel-plugin-import: ^1.13.8 classnames: ^2.3.2 @@ -35,7 +35,7 @@ specifiers: less-loader: ^5.0.0 moment: ^2.29.4 npm-run-all: ^4.1.5 - plotly.js: ^1.58.5 + plotly.js: ^2.25.2 pretty-ms: ^5.1.0 react: ^16.8.6 react-app-rewired: ^2.2.1 @@ -61,7 +61,7 @@ dependencies: ag-charts-community: 7.3.0 ag-charts-react: 7.3.0_4uflhkpzmxcxyxkuqg2ofty3gq antd: 3.26.20_wcqkhtmu7mswc6yz4uyexck3ty - axios: 0.27.2 + axios: 0.28.0 babel-jest: 24.9.0_@babel+core@7.22.11 babel-plugin-import: 1.13.8 classnames: 2.3.2 @@ -70,12 +70,12 @@ dependencies: less: 3.13.1 less-loader: 5.0.0_less@3.13.1 moment: 2.29.4 - plotly.js: 1.58.5 + plotly.js: 2.25.2 pretty-ms: 5.1.0 react: 16.14.0 react-app-rewired: 2.2.1_react-scripts@3.4.4 react-dom: 16.14.0_react@16.14.0 - react-plotly.js: 2.6.0_f6dluzp62qf57yw3gl4ocsg3e4 + react-plotly.js: 2.6.0_qtjenpcawcnnxnr626ndcvhi4u react-router: 5.3.4_react@16.14.0 react-router-dom: 5.3.4_react@16.14.0 react-scripts: 3.4.4_bo7u2dcgnntwwyyxmecoaqdaee @@ -100,14 +100,6 @@ devDependencies: packages: - /3d-view/2.0.1: - resolution: {integrity: sha512-YSLRHXNpSziaaiK2R0pI5+JKguoJVbtWmIv9YyBFtl0+q42kQwJB/JUulbFR/1zYFm58ifjKQ6kVdgZ6tyKtCA==} - dependencies: - matrix-camera-controller: 2.1.4 - orbit-camera-controller: 4.0.0 - turntable-camera-controller: 3.0.1 - dev: false - /@ampproject/remapping/2.2.1: resolution: {integrity: sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==} engines: {node: '>=6.0.0'} @@ -239,7 +231,7 @@ packages: gensync: 1.0.0-beta.2 json5: 2.2.3 lodash: 4.17.21 - resolve: 1.15.0 + resolve: 1.22.4 semver: 5.7.2 source-map: 0.5.7 transitivePeerDependencies: @@ -351,7 +343,7 @@ packages: '@babel/helper-plugin-utils': 7.22.5 debug: 4.3.4 lodash.debounce: 4.0.8 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - supports-color dev: false @@ -1842,7 +1834,7 @@ packages: '@babel/core': 7.9.0 '@babel/helper-module-imports': 7.22.5 '@babel/helper-plugin-utils': 7.22.5 - resolve: 1.15.0 + resolve: 1.22.4 semver: 5.7.2 dev: false @@ -2712,6 +2704,10 @@ packages: d3-shape: 1.3.7 dev: false + /@plotly/d3/3.8.1: + resolution: {integrity: sha512-x49ThEu1FRA00kTso4Jdfyf2byaCPLBGmLjAYQz5OzaPyLUhHesX3/Nfv2OHEhynhdy2UB39DLXq6thYe2L2kg==} + dev: false + /@plotly/point-cluster/3.1.9: resolution: {integrity: sha512-MwaI6g9scKf68Orpr1pHZ597pYx9uP8UEFXLPbsCmuw3a84obwz6pnMXGc90VhgDNeNiLEdlmuK7CPo+5PIxXw==} dependencies: @@ -2727,6 +2723,10 @@ packages: pick-by-alias: 1.2.0 dev: false + /@plotly/regl/2.1.2: + resolution: {integrity: sha512-Mdk+vUACbQvjd0m/1JJjOOafmkp/EpmHjISsopEz5Av44CBq7rPC05HHNbYGKVyNUF2zmEoBS/TT0pd0SPFFyw==} + dev: false + /@sinclair/typebox/0.27.8: resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} dev: false @@ -3290,14 +3290,6 @@ packages: resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==} dev: false - /a-big-triangle/1.0.3: - resolution: {integrity: sha512-AboEtoSPueZisde3Vr+7VRSfUIWBSGZUOtW3bJrOZXgIyK7dNNDdpDmOKJjg5GmJLlRKUONWV8lMgTK8MBhQWw==} - dependencies: - gl-buffer: 2.1.2 - gl-vao: 1.3.0 - weak-map: 1.0.8 - dev: false - /abab/2.0.6: resolution: {integrity: sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==} dev: false @@ -3361,12 +3353,6 @@ packages: object-assign: 4.1.1 dev: false - /add-line-numbers/1.0.1: - resolution: {integrity: sha512-w+2a1malCvWwACQFBpZ5/uwmHGaGYT+aGIxA8ONF5vlhe6X/gD3eR8qVoLWa+5nnWAOq2LuPbrqDYqj1pn0WMg==} - dependencies: - pad-left: 1.0.2 - dev: false - /address/1.1.2: resolution: {integrity: sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==} engines: {node: '>= 0.12.0'} @@ -3380,12 +3366,6 @@ packages: regex-parser: 2.2.11 dev: false - /affine-hull/1.0.0: - resolution: {integrity: sha512-3QNG6+vFAwJvSZHsJYDJ/mt1Cxx9n5ffA+1Ohmj7udw0JuRgUVIXK0P9N9pCMuEdS3jCNt8GFX5q2fChq+GO3Q==} - dependencies: - robust-orientation: 1.2.1 - dev: false - /ag-charts-community/7.3.0: resolution: {integrity: sha512-118U6YsCMia6iZHaN06zT19rr2SYa92WB73pMVCKQlp2H3c19uKQ6Y6DfKG/nIfNUzFXZLHBwKIdZXsMWJdZww==} dev: false @@ -3439,20 +3419,6 @@ packages: resolution: {integrity: sha512-0V/PkoculFl5+0Lp47JoxUcO0xSxhIBvm+BxHdD/OgXNmdRpRHCFnKVuUoWyS9EzQP+otSGv0m9Lb4yVkQBn2A==} dev: false - /alpha-complex/1.0.0: - resolution: {integrity: sha512-rhsjKfc9tMF5QZc0NhKz/zFzMu2rvHxCP/PyJtEmMkV7M848YjIoQGDlNGp+vTqxXjA8wAY2OxgR1K54C2Awkg==} - dependencies: - circumradius: 1.0.0 - delaunay-triangulate: 1.1.6 - dev: false - - /alpha-shape/1.0.0: - resolution: {integrity: sha512-/V+fmmjtSA2yfQNq8iEqBxnPbjcOMXpM9Ny+yE/O7aLR7Q1oPzUc9bHH0fPHS3hUugUL/dHzTis6l3JirYOS/w==} - dependencies: - alpha-complex: 1.0.0 - simplicial-complex-boundary: 1.0.1 - dev: false - /alphanum-sort/1.0.2: resolution: {integrity: sha512-0FcBfdcmaumGPQ0qPn7Q5qTgz/ooXgIyp1rf8ik5bGX8mpE2YHjC0P/eyQvxu1GURYQgq9ozf2mteQ5ZD9YiyQ==} dev: false @@ -3792,6 +3758,13 @@ packages: resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==} dev: false + /asn1.js/4.10.1: + resolution: {integrity: sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==} + dependencies: + bn.js: 4.12.0 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + /asn1.js/5.4.1: resolution: {integrity: sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==} dependencies: @@ -3859,10 +3832,6 @@ packages: engines: {node: '>= 4.0.0'} dev: true - /atob-lite/1.0.0: - resolution: {integrity: sha512-ArXcmHR/vwSN37HLVap/Y5SKpz12CuEybxe1sIYl7th/S6SQPrVMNFt6rblJzCOAxn0SHbXpknUtqbAIeo3Aow==} - dev: false - /atob/2.1.2: resolution: {integrity: sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==} engines: {node: '>= 4.5.0'} @@ -3891,11 +3860,12 @@ packages: /aws4/1.12.0: resolution: {integrity: sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==} - /axios/0.27.2: - resolution: {integrity: sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==} + /axios/0.28.0: + resolution: {integrity: sha512-Tu7NYoGY4Yoc7I+Npf9HhUMtEEpV7ZiLH9yndTCoNhcpBH0kwcvFbzYN9/u5QKI5A6uefjsNNWaz5olJVYS62Q==} dependencies: - follow-redirects: 1.15.2 + follow-redirects: 1.15.6 form-data: 4.0.0 + proxy-from-env: 1.1.0 transitivePeerDependencies: - debug dev: false @@ -3925,7 +3895,7 @@ packages: '@babel/types': 7.22.11 eslint: 6.8.0 eslint-visitor-keys: 1.3.0 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - supports-color dev: false @@ -4034,7 +4004,7 @@ packages: dependencies: '@babel/runtime': 7.9.0 cosmiconfig: 6.0.0 - resolve: 1.15.0 + resolve: 1.22.4 dev: false /babel-plugin-named-asset-import/0.3.8_@babel+core@7.9.0: @@ -4159,12 +4129,6 @@ packages: /balanced-match/1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} - /barycentric/1.0.1: - resolution: {integrity: sha512-47BuWXsenBbox4q1zqJrUoxq1oM1ysrYc5mdBACAwaP+CL+tcNauC3ybA0lzbIWzJCLZYMqebAx46EauTI2Nrg==} - dependencies: - robust-linear-solve: 1.0.0 - dev: false - /base/0.11.2: resolution: {integrity: sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==} engines: {node: '>=0.10.0'} @@ -4196,14 +4160,6 @@ packages: dependencies: tweetnacl: 0.14.5 - /big-rat/1.0.4: - resolution: {integrity: sha512-AubEohDDrak6urvKkFMIlwPWyQbJ/eq04YsK/SNipH7NNiPCYchjQNvWYK5vyyMmtGXAmNmsAjIcfkaDuTtd8g==} - dependencies: - bit-twiddle: 1.0.2 - bn.js: 4.12.0 - double-bits: 1.1.1 - dev: false - /big.js/5.2.2: resolution: {integrity: sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==} dev: false @@ -4224,15 +4180,12 @@ packages: /bindings/1.5.0: resolution: {integrity: sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==} + requiresBuild: true dependencies: file-uri-to-path: 1.0.0 dev: false optional: true - /bit-twiddle/0.0.2: - resolution: {integrity: sha512-76iFAOrkcuw5UPA30Pt32XaytMHXz/04JembgIwsQAp7ImHYSWNq1shBbrlWf6CUvh1+amQ81LI8hNhqQgsBEw==} - dev: false - /bit-twiddle/1.0.2: resolution: {integrity: sha512-B9UhK0DKFZhoTFcfvAzhqsjStvGJp9vYWf3+6SNTtdSQnvIgfkHbgHrg/e4+TH71N2GDu8tpmCVoyfrL1d7ntA==} dev: false @@ -4258,8 +4211,8 @@ packages: /bn.js/5.2.1: resolution: {integrity: sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==} - /body-parser/1.20.1: - resolution: {integrity: sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==} + /body-parser/1.20.2: + resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 @@ -4271,40 +4224,20 @@ packages: iconv-lite: 0.4.24 on-finished: 2.4.1 qs: 6.11.0 - raw-body: 2.5.1 + raw-body: 2.5.2 type-is: 1.6.18 unpipe: 1.0.0 transitivePeerDependencies: - supports-color dev: true - /body-parser/1.20.1_supports-color@6.1.0: - resolution: {integrity: sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==} - engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} - dependencies: - bytes: 3.1.2 - content-type: 1.0.5 - debug: 2.6.9_supports-color@6.1.0 - depd: 2.0.0 - destroy: 1.2.0 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - on-finished: 2.4.1 - qs: 6.11.0 - raw-body: 2.5.1 - type-is: 1.6.18 - unpipe: 1.0.0 - transitivePeerDependencies: - - supports-color - dev: false - - /body-parser/1.20.2: + /body-parser/1.20.2_supports-color@6.1.0: resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 content-type: 1.0.5 - debug: 2.6.9 + debug: 2.6.9_supports-color@6.1.0 depd: 2.0.0 destroy: 1.2.0 http-errors: 2.0.0 @@ -4316,7 +4249,7 @@ packages: unpipe: 1.0.0 transitivePeerDependencies: - supports-color - dev: true + dev: false /bonjour/3.5.0: resolution: {integrity: sha512-RaVTblr+OnEli0r/ud8InrU7D+G0y6aJhlxaLa6Pwty4+xoxboF1BsUI45tujvRpbj9dQVoglChqonGAsjEBYg==} @@ -4333,17 +4266,6 @@ packages: resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} dev: false - /boundary-cells/2.0.2: - resolution: {integrity: sha512-/S48oUFYEgZMNvdqC87iYRbLBAPHYijPRNrNpm/sS8u7ijIViKm/hrV3YD4sx/W68AsG5zLMyBEditVHApHU5w==} - dev: false - - /box-intersect/1.0.2: - resolution: {integrity: sha512-yJeMwlmFPG1gIa7Rs/cGXeI6iOj6Qz5MG5PE61xLKpElUGzmJ4abm+qsLpzxKJFpsSDq742BQEocr8dI2t8Nxw==} - dependencies: - bit-twiddle: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - /boxen/3.2.0: resolution: {integrity: sha512-cU4J/+NodM3IHdSL2yN8bqYqnmlBTidDR4RC7nJs61ZmtGz8VZzM3HLQX0zY5mrSmPtR3xWwsq2jOUQqFZN8+A==} engines: {node: '>=6'} @@ -4463,17 +4385,19 @@ packages: bn.js: 5.2.1 randombytes: 2.1.0 - /browserify-sign/4.2.1: - resolution: {integrity: sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==} + /browserify-sign/4.2.3: + resolution: {integrity: sha512-JWCZW6SKhfhjJxO8Tyiiy+XYB7cqd2S5/+WeYHsKdNKFlCBhKbblba1A/HN/90YwtxKc8tCErjffZl++UNmGiw==} + engines: {node: '>= 0.12'} dependencies: bn.js: 5.2.1 browserify-rsa: 4.1.0 create-hash: 1.2.0 create-hmac: 1.1.7 - elliptic: 6.5.4 + elliptic: 6.5.5 + hash-base: 3.0.4 inherits: 2.0.4 - parse-asn1: 5.1.6 - readable-stream: 3.6.2 + parse-asn1: 5.1.7 + readable-stream: 2.3.8 safe-buffer: 5.2.1 /browserify-zlib/0.2.0: @@ -4706,18 +4630,6 @@ packages: /caseless/0.12.0: resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} - /cdt2d/1.0.0: - resolution: {integrity: sha512-pFKb7gVhpsI6onS5HUXRoqbBIJB4CJ+KPk8kgaIVcm0zFgOxIyBT5vzifZ4j1aoGVJS0U1A+S4oFDshuLAitlA==} - dependencies: - binary-search-bounds: 2.0.5 - robust-in-sphere: 1.2.1 - robust-orientation: 1.2.1 - dev: false - - /cell-orientation/1.0.1: - resolution: {integrity: sha512-DtEsrgP+donmPxpEZm7hK8zCPYDXAQ977ecJiE7G0gbTfnS6TZVBlief3IdRP/TZS1PVnJRGJTDdjSdV8mRDug==} - dev: false - /chalk/1.1.3: resolution: {integrity: sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==} engines: {node: '>=0.10.0'} @@ -4831,19 +4743,6 @@ packages: inherits: 2.0.4 safe-buffer: 5.2.1 - /circumcenter/1.0.0: - resolution: {integrity: sha512-YRw0mvttcISviaOtSmaHb2G3ZVbkxzYPQeAEd57/CFFtmOkwfRTw9XuxYZ7PCi2BYa0NajjHV6bq4nbY1VCC8g==} - dependencies: - dup: 1.0.0 - robust-linear-solve: 1.0.0 - dev: false - - /circumradius/1.0.0: - resolution: {integrity: sha512-5ltoQvWQzJiZjCVX9PBKgKt+nsuzOLKayqXMNllfRSqIp2L5jFpdanv1V6j27Ue7ACxlzmamlR+jnLy+NTTVTw==} - dependencies: - circumcenter: 1.0.0 - dev: false - /clamp/1.0.1: resolution: {integrity: sha512-kgMuFyE78OC6Dyu3Dy7vcx4uy97EIbVxJB/B0eJ3bUNAkwdNcxYzgKltnyADiYwsR7SEqkkUPsEUT//OVS6XMA==} dev: false @@ -4872,18 +4771,6 @@ packages: source-map: 0.6.1 dev: false - /clean-pslg/1.1.2: - resolution: {integrity: sha512-bJnEUR6gRiiNi2n4WSC6yrc0Hhn/oQDOTzs6evZfPwEF/VKVXM6xu0F4n/WSBz7TjTt/ZK6I5snRM9gVKMVAxA==} - dependencies: - big-rat: 1.0.4 - box-intersect: 1.0.2 - nextafter: 1.0.0 - rat-vec: 1.1.1 - robust-segment-intersect: 1.0.1 - union-find: 1.0.2 - uniq: 1.0.1 - dev: false - /clean-regexp/1.0.0: resolution: {integrity: sha512-GfisEZEJvzKrmGWkvfhgzcz/BllN1USeqD2V6tg14OAOgaCD2Z/PUEuxnAZ/nPvmaHRG7a8y77p1T/IRQ4D1Hw==} engines: {node: '>=4'} @@ -5048,12 +4935,6 @@ packages: color-string: 1.9.1 dev: false - /colormap/2.3.2: - resolution: {integrity: sha512-jDOjaoEEmA9AgA11B/jCSAvYE95r3wRoAyTf3LEHGiUVlNHJaL1mRkf5AyLSpQBVGfTEPwGEqCIzL+kgr2WgNA==} - dependencies: - lerp: 1.0.3 - dev: false - /combined-stream/1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -5077,27 +4958,6 @@ packages: /commondir/1.0.1: resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==} - /compare-angle/1.0.1: - resolution: {integrity: sha512-adM1/bpLFQFquh0/Qr5aiOPuztoga/lCf2Z45s+Oydgzf18F3wBSkdHmcHMeig0bD+dDKlz52u1rLOAOqiyE5A==} - dependencies: - robust-orientation: 1.2.1 - robust-product: 1.0.0 - robust-sum: 1.0.0 - signum: 0.0.0 - two-sum: 1.0.0 - dev: false - - /compare-cell/1.0.0: - resolution: {integrity: sha512-uNIkjiNLZLhdCgouF39J+W04R7oP1vwrNME4vP2b2/bAa6PHOj+h8yXu52uPjPTKs5RatvqNsDVwEN7Yp19vNA==} - dev: false - - /compare-oriented-cell/1.0.1: - resolution: {integrity: sha512-9D7R2MQfsGGRskZAZF0TkJHt9eFNbFkZyVdVps+WUYxtRHgG77BLbieKgSkj7iEAb9PNDSU9QNa9MtigjQ3ktQ==} - dependencies: - cell-orientation: 1.0.1 - compare-cell: 1.0.0 - dev: false - /component-classes/1.2.6: resolution: {integrity: sha512-hPFGULxdwugu1QWW3SvVOCUHLzO34+a2J6Wqy0c5ASQkfi9/8nZcBB0ZohaEbXOQlCflMAEMmEWk7u7BVs4koA==} dependencies: @@ -5242,19 +5102,11 @@ packages: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} dev: false - /convex-hull/1.0.3: - resolution: {integrity: sha512-24rZAoh81t41GHPLAxcsokgjH9XNoVqU2OiSi8iMHUn6HUURfiefcEWAPt1AfwZjBBWTKadOm1xUcUMnfFukhQ==} - dependencies: - affine-hull: 1.0.0 - incremental-convex-hull: 1.0.1 - monotone-convex-hull-2d: 1.0.1 - dev: false - /cookie-signature/1.0.6: resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} - /cookie/0.5.0: - resolution: {integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==} + /cookie/0.6.0: + resolution: {integrity: sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==} engines: {node: '>= 0.6'} /copy-anything/2.0.6: @@ -5420,7 +5272,7 @@ packages: resolution: {integrity: sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==} dependencies: browserify-cipher: 1.0.1 - browserify-sign: 4.2.1 + browserify-sign: 4.2.3 create-ecdh: 4.0.4 create-hash: 1.2.0 create-hmac: 1.1.7 @@ -5714,10 +5566,6 @@ packages: resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==} dev: false - /cubic-hermite/1.0.0: - resolution: {integrity: sha512-DKZ6yLcJiJJgl54mGA4n0uueYB4qdPfOJrQ1HSEZqdKp6D25AAAWVDwpoAxLflOku5a/ALBO77oEIyWcVa+UYg==} - dev: false - /currently-unhandled/0.4.1: resolution: {integrity: sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==} engines: {node: '>=0.10.0'} @@ -5731,12 +5579,6 @@ packages: lodash.flow: 3.5.0 dev: false - /cwise-compiler/1.1.3: - resolution: {integrity: sha512-WXlK/m+Di8DMMcCjcWr4i+XzcQra9eCdXIJrgh4TUgh0pIS/yJduLxS9JgefsHJ/YVLdgPtXm9r62W92MvanEQ==} - dependencies: - uniq: 1.0.1 - dev: false - /cyclist/1.0.2: resolution: {integrity: sha512-0sVXIohTfLqVIW3kb/0n6IiWF3Ifj5nm2XaSrLq2DI6fKIGa2fYAZdk917rUneaeLVpYfFcyXE2ft0fe3remsA==} dev: false @@ -5744,7 +5586,7 @@ packages: /d/1.0.1: resolution: {integrity: sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==} dependencies: - es5-ext: 0.10.62 + es5-ext: 0.10.64 type: 1.2.0 dev: false @@ -5773,12 +5615,33 @@ packages: d3-timer: 1.0.10 dev: false + /d3-format/1.4.5: + resolution: {integrity: sha512-J0piedu6Z8iB6TbIGfZgDzfXxUFN3qQRMofy2oPdXzQibYGqPB/9iMcxr/TGalU+2RsyDO+U4f33id8tbnSRMQ==} + dev: false + + /d3-geo-projection/2.9.0: + resolution: {integrity: sha512-ZULvK/zBn87of5rWAfFMc9mJOipeSo57O+BBitsKIXmU4rTVAnX1kSsJkE0R+TxY8pGNoM1nbyRRE7GYHhdOEQ==} + hasBin: true + dependencies: + commander: 2.20.3 + d3-array: 1.2.4 + d3-geo: 1.12.1 + resolve: 1.22.4 + dev: false + + /d3-geo/1.12.1: + resolution: {integrity: sha512-XG4d1c/UJSEX9NfU02KwBL6BYPj8YKHxgBEw5om2ZnTRSbIcego6dhHwcxuSR3clxh0EpE38os1DVPOmnYtTPg==} + dependencies: + d3-array: 1.2.4 + dev: false + /d3-hierarchy/1.1.9: resolution: {integrity: sha512-j8tPxlqh1srJHAtxfvOUwKNYJkQuBFdM1+JAUfq6xqH5eAqf93L7oG1NVqDa4CpFZNvnNKtCYEUC8KY9yEn9lQ==} dev: false - /d3-interpolate/1.4.0: - resolution: {integrity: sha512-V9znK0zc3jOPV4VD2zZn0sDhZU3WAE2bmlxdIwwQPPzPjvyLkd8B3JUVdS1IDUFDkWZ72c9qnv1GK2ZagTZ8EA==} + /d3-interpolate/3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} dependencies: d3-color: 1.4.1 dev: false @@ -5811,10 +5674,6 @@ packages: resolution: {integrity: sha512-B1JDm0XDaQC+uvo4DT79H0XmBskgS3l6Ve+1SBCfxgmtIb1AVrPIoqd+nPSv+loMX8szQ0sVUhGngL7D5QPiXw==} dev: false - /d3/3.5.17: - resolution: {integrity: sha512-yFk/2idb8OHPKkbAL8QaOaqENNoMhIaSHZerk3oQsECwkObkCpJyjYwCe+OHiq6UEdhe1m8ZGARRRO3ljFjlKg==} - dev: false - /damerau-levenshtein/1.0.8: resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} dev: false @@ -6016,13 +5875,6 @@ packages: rimraf: 2.7.1 dev: false - /delaunay-triangulate/1.1.6: - resolution: {integrity: sha512-mhAclqFCgLoiBIDQDIz2K+puZq6OhYxunXrG2wtTcZS+S1xuzl+H3h0MIOajpES+Z+jfY/rz0wVt3o5iipt1wg==} - dependencies: - incremental-convex-hull: 1.0.1 - uniq: 1.0.1 - dev: false - /delayed-stream/1.0.0: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} @@ -6109,7 +5961,7 @@ packages: /dns-packet/1.3.4: resolution: {integrity: sha512-BQ6F4vycLXBvdrJZ6S3gZewt6rcrks9KBgM9vrhW+knGRqc8uEdT7fuCwloc7nny5xNoMJ17HGH0R/6fpo8ECA==} dependencies: - ip: 1.1.8 + ip: 1.1.9 safe-buffer: 5.2.1 dev: false @@ -6254,10 +6106,6 @@ packages: engines: {node: '>=8'} dev: false - /double-bits/1.1.1: - resolution: {integrity: sha512-BCLEIBq0O/DWoA7BsCu/R+RP0ZXiowP8BhtJT3qeuuQEBpnS8LK/Wo6UTJQv6v8mK1fj8n90YziHLwGdM5whSg==} - dev: false - /draft-js/0.10.5_wcqkhtmu7mswc6yz4uyexck3ty: resolution: {integrity: sha512-LE6jSCV9nkPhfVX2ggcRLA4FKs6zWq9ceuO/88BpXdNCS7mjRTgs0NsV6piUCJX9YxMsB9An33wnkMmU2sD2Zg==} peerDependencies: @@ -6314,12 +6162,6 @@ packages: jsbn: 0.1.1 safer-buffer: 2.1.2 - /edges-to-adjacency-list/1.0.0: - resolution: {integrity: sha512-0n0Z+xTLfg96eYXm91PEY4rO4WGxohLWjJ9qD1RI3fzxKU6GHez+6KPajpobR4zeZxp7rSiHjHG5dZPj8Kj58Q==} - dependencies: - uniq: 1.0.1 - dev: false - /ee-first/1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} @@ -6348,6 +6190,17 @@ packages: minimalistic-assert: 1.0.1 minimalistic-crypto-utils: 1.0.1 + /elliptic/6.5.5: + resolution: {integrity: sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==} + dependencies: + bn.js: 4.12.0 + brorand: 1.1.0 + hash.js: 1.1.7 + hmac-drbg: 1.0.1 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + minimalistic-crypto-utils: 1.0.1 + /emoji-regex/7.0.3: resolution: {integrity: sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==} @@ -6525,13 +6378,14 @@ packages: is-date-object: 1.0.5 is-symbol: 1.0.4 - /es5-ext/0.10.62: - resolution: {integrity: sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==} + /es5-ext/0.10.64: + resolution: {integrity: sha512-p2snDhiLaXe6dahss1LddxqEm+SkuDvV8dnIQG0MWjyHpcMNfXKPE+/Cc0y+PhxJX3A4xGNeFCj5oc0BUh6deg==} engines: {node: '>=0.10'} requiresBuild: true dependencies: es6-iterator: 2.0.3 es6-symbol: 3.1.3 + esniff: 2.0.1 next-tick: 1.1.0 dev: false @@ -6539,14 +6393,10 @@ packages: resolution: {integrity: sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==} dependencies: d: 1.0.1 - es5-ext: 0.10.62 + es5-ext: 0.10.64 es6-symbol: 3.1.3 dev: false - /es6-promise/4.2.8: - resolution: {integrity: sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==} - dev: false - /es6-symbol/3.1.3: resolution: {integrity: sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==} dependencies: @@ -6558,7 +6408,7 @@ packages: resolution: {integrity: sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==} dependencies: d: 1.0.1 - es5-ext: 0.10.62 + es5-ext: 0.10.64 es6-iterator: 2.0.3 es6-symbol: 3.1.3 dev: false @@ -6871,7 +6721,7 @@ packages: minimatch: 3.1.2 object.values: 1.1.6 read-pkg-up: 2.0.0 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -7180,6 +7030,16 @@ packages: transitivePeerDependencies: - supports-color + /esniff/2.0.1: + resolution: {integrity: sha512-kTUIGKQ/mDPFoJ0oVfcmyJn4iBDRptjNVIzwIFR7tqWXdVI9xfA2RMwY/gbSpJG3lkdWNEjLap/NqVHZiJsdfg==} + engines: {node: '>=0.10'} + dependencies: + d: 1.0.1 + es5-ext: 0.10.64 + event-emitter: 0.3.5 + type: 2.7.2 + dev: false + /espree/6.2.1: resolution: {integrity: sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==} engines: {node: '>=6.0.0'} @@ -7234,6 +7094,13 @@ packages: resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} engines: {node: '>= 0.6'} + /event-emitter/0.3.5: + resolution: {integrity: sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==} + dependencies: + d: 1.0.1 + es5-ext: 0.10.64 + dev: false + /eventemitter3/4.0.7: resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} dev: false @@ -7344,16 +7211,16 @@ packages: - supports-color dev: true - /express/4.18.2: - resolution: {integrity: sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==} + /express/4.19.2: + resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.1 + body-parser: 1.20.2 content-disposition: 0.5.4 content-type: 1.0.5 - cookie: 0.5.0 + cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9 depd: 2.0.0 @@ -7383,16 +7250,16 @@ packages: - supports-color dev: true - /express/4.18.2_supports-color@6.1.0: - resolution: {integrity: sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==} + /express/4.19.2_supports-color@6.1.0: + resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.1_supports-color@6.1.0 + body-parser: 1.20.2_supports-color@6.1.0 content-disposition: 0.5.4 content-type: 1.0.5 - cookie: 0.5.0 + cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9_supports-color@6.1.0 depd: 2.0.0 @@ -7483,10 +7350,6 @@ packages: - supports-color dev: false - /extract-frustum-planes/1.0.0: - resolution: {integrity: sha512-GivvxEMgjSNnB3e1mIMBlB5ogPB6XyEjOQRGG0SfYVVLtu1ntLGHLT1ly8+mE819dKBHBwnm9+UBCScjiMgppA==} - dev: false - /extsprintf/1.3.0: resolution: {integrity: sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==} engines: {'0': node >=0.6.0} @@ -7592,6 +7455,7 @@ packages: /file-uri-to-path/1.0.0: resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==} + requiresBuild: true dev: false optional: true @@ -7620,13 +7484,6 @@ packages: dependencies: to-regex-range: 5.0.1 - /filtered-vector/1.2.5: - resolution: {integrity: sha512-5Vu6wdtQJ1O2nRmz39dIr9m3hEDq1skYby5k1cJQdNWK4dMgvYcUEiA/9j7NcKfNZ5LGxn8w2LSLiigyH7pTAw==} - dependencies: - binary-search-bounds: 2.0.5 - cubic-hermite: 1.0.0 - dev: false - /finalhandler/1.2.0: resolution: {integrity: sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==} engines: {node: '>= 0.8'} @@ -7742,8 +7599,8 @@ packages: readable-stream: 2.3.8 dev: false - /follow-redirects/1.15.2: - resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} + /follow-redirects/1.15.6: + resolution: {integrity: sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==} engines: {node: '>=4.0'} peerDependencies: debug: '*' @@ -7752,8 +7609,8 @@ packages: optional: true dev: false - /follow-redirects/1.15.2_debug@4.3.4: - resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} + /follow-redirects/1.15.6_debug@4.3.4: + resolution: {integrity: sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==} engines: {node: '>=4.0'} peerDependencies: debug: '*' @@ -7969,10 +7826,6 @@ packages: /functions-have-names/1.2.3: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} - /gamma/0.1.0: - resolution: {integrity: sha512-IgHc/jnzNTA2KjXmRSx/CVd1ONp7HTAV81SLI+n3G6PyyHkakkE+2d3hteJYFm7aoe01NEl4m7ziUAsoWCc5AA==} - dev: false - /gensync/1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} @@ -8056,120 +7909,6 @@ packages: dependencies: assert-plus: 1.0.0 - /gl-axes3d/1.5.3: - resolution: {integrity: sha512-KRYbguKQcDQ6PcB9g1pgqB8Ly4TY1DQODpPKiDTasyWJ8PxQk0t2Q7XoQQijNqvsguITCpVVCzNb5GVtIWiVlQ==} - dependencies: - bit-twiddle: 1.0.2 - dup: 1.0.0 - extract-frustum-planes: 1.0.0 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-state: 1.0.0 - gl-vao: 1.3.0 - gl-vec4: 1.0.1 - glslify: 7.1.1 - robust-orientation: 1.2.1 - split-polygon: 1.0.0 - vectorize-text: 3.2.2 - dev: false - - /gl-buffer/2.1.2: - resolution: {integrity: sha512-uVvLxxhEbQGl43xtDeKu75ApnrGyNHoPmOcvvuJNyP04HkK0/sX5Dll6OFffQiwSV4j0nlAZsgznvO3CPT3dFg==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - - /gl-cone3d/1.5.2: - resolution: {integrity: sha512-1JNeHH4sUtUmDA4ZK7Om8/kShwb8IZVAsnxaaB7IPRJsNGciLj1sTpODrJGeMl41RNkex5kXD2SQFrzyEAR2Rw==} - dependencies: - colormap: 2.3.2 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - gl-vec3: 1.1.3 - glsl-inverse: 1.0.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - ndarray: 1.0.19 - dev: false - - /gl-constants/1.0.0: - resolution: {integrity: sha512-3DNyoAUdb1c+o7jNk5Nm7eh6RSQFi9ZmMQIQb2xxsO27rUopE+IUhoh4xlUvZYBn1YPgUC8BlCnrVjXq/d2dQA==} - dev: false - - /gl-contour2d/1.1.7: - resolution: {integrity: sha512-GdebvJ9DtT3pJDpoE+eU2q+Wo9S3MijPpPz5arZbhK85w2bARmpFpVfPaDlZqWkB644W3BlH8TVyvAo1KE4Bhw==} - dependencies: - binary-search-bounds: 2.0.5 - cdt2d: 1.0.0 - clean-pslg: 1.1.2 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - iota-array: 1.0.0 - ndarray: 1.0.19 - surface-nets: 1.0.2 - dev: false - - /gl-error3d/1.0.16: - resolution: {integrity: sha512-TGJewnKSp7ZnqGgG3XCF9ldrDbxZrO+OWlx6oIet4OdOM//n8xJ5isArnIV/sdPJnFbhfoLxWrW9f5fxHFRQ1A==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - dev: false - - /gl-fbo/2.0.5: - resolution: {integrity: sha512-tDq6zQSQzvvK2QwPV7ln7cf3rs0jV1rQXqKOEuB145LdN+xhADPBtXHDJ3Ftk80RAJimJU0AaQBgP/X6yYGNhQ==} - dependencies: - gl-texture2d: 2.1.0 - dev: false - - /gl-format-compiler-error/1.0.3: - resolution: {integrity: sha512-FtQaBYlsM/rnz7YhLkxG9dLcNDB+ExErIsFV2DXl0nk+YgIZ2i0jMob4BrhT9dNa179zFb0gZMWpNAokytK+Ug==} - dependencies: - add-line-numbers: 1.0.1 - gl-constants: 1.0.0 - glsl-shader-name: 1.0.0 - sprintf-js: 1.1.2 - dev: false - - /gl-heatmap2d/1.1.1: - resolution: {integrity: sha512-6Vo1fPIB1vQFWBA/MR6JAA16XuQuhwvZRbSjYEq++m4QV33iqjGS2HcVIRfJGX+fomd5eiz6bwkVZcKm69zQPw==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - iota-array: 1.0.0 - typedarray-pool: 1.2.0 - dev: false - - /gl-line3d/1.2.1: - resolution: {integrity: sha512-eeb0+RI2ZBRqMYJK85SgsRiJK7c4aiOjcnirxv0830A3jmOc99snY3AbPcV8KvKmW0Yaf3KA4e+qNCbHiTOTnA==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - ndarray: 1.0.19 - dev: false - - /gl-mat3/1.0.0: - resolution: {integrity: sha512-obeEq9y7xaDoVkwMGJNL1upwpYlPJiXJFhREaNytMqUdfHKHNna9HvImmLV8F8Ys6QOYwPPddptZNoiiec/XOg==} - dev: false - /gl-mat4/1.2.0: resolution: {integrity: sha512-sT5C0pwB1/e9G9AvAoLsoaJtbMGjfd/jfxo8jMCKqYYEnjZuFvqV5rehqar0538EmssjdDeiEWnKyBSTw7quoA==} dev: false @@ -8178,169 +7917,6 @@ packages: resolution: {integrity: sha512-wcCp8vu8FT22BnvKVPjXa/ICBWRq/zjFfdofZy1WSpQZpphblv12/bOQLBC1rMM7SGOFS9ltVmKOHil5+Ml7gA==} dev: false - /gl-mesh3d/2.3.1: - resolution: {integrity: sha512-pXECamyGgu4/9HeAQSE5OEUuLBGS1aq9V4BCsTcxsND4fNLaajEkYKUz/WY2QSYElqKdsMBVsldGiKRKwlybqA==} - dependencies: - barycentric: 1.0.1 - colormap: 2.3.2 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - ndarray: 1.0.19 - normals: 1.1.0 - polytope-closest-point: 1.0.0 - simplicial-complex-contour: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - - /gl-plot2d/1.4.5: - resolution: {integrity: sha512-6GmCN10SWtV+qHFQ1gjdnVubeHFVsm6P4zmo0HrPIl9TcdePCUHDlBKWAuE6XtFhiMKMj7R8rApOX8O8uXUYog==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-select-static: 2.0.7 - gl-shader: 4.3.1 - glsl-inverse: 1.0.0 - glslify: 7.1.1 - text-cache: 4.2.2 - dev: false - - /gl-plot3d/2.4.7: - resolution: {integrity: sha512-mLDVWrl4Dj0O0druWyHUK5l7cBQrRIJRn2oROEgrRuOgbbrLAzsREKefwMO0bA0YqkiZMFMnV5VvPA9j57X5Xg==} - dependencies: - 3d-view: 2.0.1 - a-big-triangle: 1.0.3 - gl-axes3d: 1.5.3 - gl-fbo: 2.0.5 - gl-mat4: 1.2.0 - gl-select-static: 2.0.7 - gl-shader: 4.3.1 - gl-spikes3d: 1.0.10 - glslify: 7.1.1 - has-passive-events: 1.0.0 - is-mobile: 2.2.2 - mouse-change: 1.4.0 - mouse-event-offset: 3.0.2 - mouse-wheel: 1.2.0 - ndarray: 1.0.19 - right-now: 1.0.0 - dev: false - - /gl-pointcloud2d/1.0.3: - resolution: {integrity: sha512-OS2e1irvJXVRpg/GziXj10xrFJm9kkRfFoB6BLUvkjCQV7ZRNNcs2CD+YSK1r0gvMwTg2T3lfLM3UPwNtz+4Xw==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - typedarray-pool: 1.2.0 - dev: false - - /gl-quat/1.0.0: - resolution: {integrity: sha512-Pv9yvjJgQN85EbE79S+DF50ujxDkyjfYHIyXJcCRiimU1UxMY7vEHbVkj0IWLFaDndhfZT9vVOyfdMobLlrJsQ==} - dependencies: - gl-mat3: 1.0.0 - gl-vec3: 1.1.3 - gl-vec4: 1.0.1 - dev: false - - /gl-scatter3d/1.2.3: - resolution: {integrity: sha512-nXqPlT1w5Qt51dTksj+DUqrZqwWAEWg0PocsKcoDnVNv0X8sGA+LBZ0Y+zrA+KNXUL0PPCX9WR9cF2uJAZl1Sw==} - dependencies: - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - is-string-blank: 1.0.1 - typedarray-pool: 1.2.0 - vectorize-text: 3.2.2 - dev: false - - /gl-select-box/1.0.4: - resolution: {integrity: sha512-mKsCnglraSKyBbQiGq0Ila0WF+m6Tr+EWT2yfaMn/Sh9aMHq5Wt0F/l6Cf/Ed3CdERq5jHWAY5yxLviZteYu2w==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - dev: false - - /gl-select-static/2.0.7: - resolution: {integrity: sha512-OvpYprd+ngl3liEatBTdXhSyNBjwvjMSvV2rN0KHpTU+BTi4viEETXNZXFgGXY37qARs0L28ybk3UQEW6C5Nnw==} - dependencies: - bit-twiddle: 1.0.2 - gl-fbo: 2.0.5 - ndarray: 1.0.19 - typedarray-pool: 1.2.0 - dev: false - - /gl-shader/4.3.1: - resolution: {integrity: sha512-xLoN6XtRLlg97SEqtuzfKc+pVWpVkQ3YjDI1kuCale8tF7+zMhiKlMfmG4IMQPMdKJZQbIc/Ny8ZusEpfh5U+w==} - dependencies: - gl-format-compiler-error: 1.0.3 - weakmap-shim: 1.1.1 - dev: false - - /gl-spikes2d/1.0.2: - resolution: {integrity: sha512-QVeOZsi9nQuJJl7NB3132CCv5KA10BWxAY2QgJNsKqbLsG53B/TrGJpjIAohnJftdZ4fT6b3ZojWgeaXk8bOOA==} - dev: false - - /gl-spikes3d/1.0.10: - resolution: {integrity: sha512-lT3xroowOFxMvlhT5Mof76B2TE02l5zt/NIWljhczV2FFHgIVhA4jMrd5dIv1so1RXMBDJIKu0uJI3QKliDVLg==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glslify: 7.1.1 - dev: false - - /gl-state/1.0.0: - resolution: {integrity: sha512-Od836PpgCuTC0W7uHYnEEPRdQPL1FakWlznz3hRvlO6tD5sdLfBKX9qNRGy1DjfMCDTudhyYWxiWjhql1B8N4Q==} - dependencies: - uniq: 1.0.1 - dev: false - - /gl-streamtube3d/1.4.1: - resolution: {integrity: sha512-rH02v00kgwgdpkXVo7KsSoPp38bIAYR9TE1iONjcQ4cQAlDhrGRauqT/P5sUaOIzs17A2DxWGcXM+EpNQs9pUA==} - dependencies: - gl-cone3d: 1.5.2 - gl-vec3: 1.1.3 - gl-vec4: 1.0.1 - glsl-inverse: 1.0.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - dev: false - - /gl-surface3d/1.6.0: - resolution: {integrity: sha512-x15+u4712ysnB85G55RLJEml6mOB4VaDn0VTlXCc9JcjRl5Es10Tk7lhGGyiPtkCfHwvhnkxzYA1/rHHYN7Y0A==} - dependencies: - binary-search-bounds: 2.0.5 - bit-twiddle: 1.0.2 - colormap: 2.3.2 - dup: 1.0.0 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glsl-specular-beckmann: 1.1.2 - glslify: 7.1.1 - ndarray: 1.0.19 - ndarray-gradient: 1.0.1 - ndarray-ops: 1.2.2 - ndarray-pack: 1.2.1 - ndarray-scratch: 1.2.0 - surface-nets: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - /gl-text/1.3.1: resolution: {integrity: sha512-/f5gcEMiZd+UTBJLTl3D+CkCB/0UFGTx3nflH8ZmyWcLkZhsZ1+Xx5YYkw2rgWAzgPeE35xCqBuHSoMKQVsR+w==} dependencies: @@ -8363,14 +7939,6 @@ packages: typedarray-pool: 1.2.0 dev: false - /gl-texture2d/2.1.0: - resolution: {integrity: sha512-W0tzEjtlGSsCKq5FFwFVhH+fONFUTUeqM4HhA/BleygKaX39IwNTVOiqkwfu9szQZ4dQEq8ZDl7w1ud/eKLaZA==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - /gl-util/3.1.3: resolution: {integrity: sha512-dvRTggw5MSkJnCbh74jZzSoTOGnVYK+Bt+Ckqm39CVcl6+zSsxqWk4lr5NKhkqXHL6qvZAU9h17ZF8mIskY9mA==} dependencies: @@ -8383,18 +7951,6 @@ packages: weak-map: 1.0.8 dev: false - /gl-vao/1.3.0: - resolution: {integrity: sha512-stSOZ+n0fnAxgDfipwKK/73AwzCNL+AFEc/v2Xm76nyFnUZGmQtD2FEC3lt1icoOHAzMgHBAjCue7dBIDeOTcw==} - dev: false - - /gl-vec3/1.1.3: - resolution: {integrity: sha512-jduKUqT0SGH02l8Yl+mV1yVsDfYgQAJyXGxkJQGyxPLHRiW25DwVIRPt6uvhrEMHftJfqhqKthRcyZqNEl9Xdw==} - dev: false - - /gl-vec4/1.0.1: - resolution: {integrity: sha512-/gx5zzIy75JXzke4yuwcbvK+COWf8UJbVCUPvhfsYVw1GVey4Eextk/0H0ctXnOICruNK7+GS4ILQzEQcHcPEg==} - dev: false - /glob-parent/3.1.0: resolution: {integrity: sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==} dependencies: @@ -8517,14 +8073,6 @@ packages: glsl-tokenizer: 2.1.5 dev: false - /glsl-inverse/1.0.0: - resolution: {integrity: sha512-+BsseNlgqzd4IFX1dMqg+S0XuIXzH0acvTtW7svwhJESM1jb2BZFwdO+tOWdCXD5Zse6b9bOmzp5sCNA7GQ2QA==} - dev: false - - /glsl-out-of-range/1.0.4: - resolution: {integrity: sha512-fCcDu2LCQ39VBvfe1FbhuazXEf0CqMZI9OYXrYlL6uUARG48CTAbL04+tZBtVM0zo1Ljx4OLu2AxNquq++lxWQ==} - dev: false - /glsl-resolve/0.0.1: resolution: {integrity: sha512-xxFNsfnhZTK9NBhzJjSBGX6IOqYpvBHxxmo+4vapiljyGNCY0Bekzn0firQkQrazK59c1hYxMDxYS8MDlhw4gA==} dependencies: @@ -8532,23 +8080,6 @@ packages: xtend: 2.2.0 dev: false - /glsl-shader-name/1.0.0: - resolution: {integrity: sha512-OtHon0dPCbJD+IrVA1vw9QDlp2cS/f9z8X/0y+W7Qy1oZ3U1iFAQUEco2v30V0SAlVLDG5rEfhjEfc3DKdGbFQ==} - dependencies: - atob-lite: 1.0.0 - glsl-tokenizer: 2.1.5 - dev: false - - /glsl-specular-beckmann/1.1.2: - resolution: {integrity: sha512-INvd7szO1twNPLGwE0Kf2xXIEy5wpOPl/LYoiw3+3nbAe6Rfn5rjdK9xvfnwoWksTCs3RejuLeAiZkLTkdFtwg==} - dev: false - - /glsl-specular-cook-torrance/2.0.1: - resolution: {integrity: sha512-bFtTfbgLXIbto/U6gM7h0IxoPMU+5zpMK5HoAaA2LnPuGk3JSzKAnsoyh5QGTT8ioIEQrjk6jcQNrgujPsP7rw==} - dependencies: - glsl-specular-beckmann: 1.1.2 - dev: false - /glsl-token-assignments/2.0.2: resolution: {integrity: sha512-OwXrxixCyHzzA0U2g4btSNAyB2Dx8XrztY5aVUCjRSh4/D0WoJn8Qdps7Xub3sz6zE73W3szLrmWtQ7QMpeHEQ==} dev: false @@ -8804,7 +8335,14 @@ packages: resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} engines: {node: '>= 0.4.0'} dependencies: - function-bind: 1.1.1 + function-bind: 1.1.1 + + /hash-base/3.0.4: + resolution: {integrity: sha512-EeeoJKjTyt868liAlVmcv2ZsUfGHlE3Q+BICOXcZiwN3osr5Q/zFGYmTJpoIzuaSTAwndFy+GqhEwlU4L3j4Ow==} + engines: {node: '>=4'} + dependencies: + inherits: 2.0.4 + safe-buffer: 5.2.1 /hash-base/3.1.0: resolution: {integrity: sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==} @@ -8990,7 +8528,7 @@ packages: engines: {node: '>=8.0.0'} dependencies: eventemitter3: 4.0.7 - follow-redirects: 1.15.2_debug@4.3.4 + follow-redirects: 1.15.6_debug@4.3.4 requires-port: 1.0.0 transitivePeerDependencies: - debug @@ -9062,12 +8600,6 @@ packages: dev: false optional: true - /image-size/0.7.5: - resolution: {integrity: sha512-Hiyv+mXHfFEP7LzUL/llg9RwFxxY+o9N3JVLIeG5E7iFIFAalxvRU9UZthBdYDEVnzHMgjnKJPPpay5BWf1g9g==} - engines: {node: '>=6.9.0'} - hasBin: true - dev: false - /immer/1.10.0: resolution: {integrity: sha512-O3sR1/opvCDGLEVcvrGTMtLac8GJ5IwZC4puPrLuRj3l7ICKvkmA0vGuU9OW8mV9WIBRnaxp5GJh9IEAaNOoYg==} dev: false @@ -9129,13 +8661,6 @@ packages: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} - /incremental-convex-hull/1.0.1: - resolution: {integrity: sha512-mKRJDXtzo1R9LxCuB1TdwZXHaPaIEldoGPsXy2jrJc/kufyqp8y/VAQQxThSxM2aroLoh6uObexPk1ASJ7FB7Q==} - dependencies: - robust-orientation: 1.2.1 - simplicial-complex: 1.0.0 - dev: false - /indent-string/3.2.0: resolution: {integrity: sha512-BYqTHXTGUIvg7t1r4sJNKcbDZkL92nkXA8YtRpbjFHRHGDL/NtUeiBJMeE60kIFN/Mg8ESaWQvftaYMGJzQZCQ==} engines: {node: '>=4'} @@ -9234,33 +8759,19 @@ packages: engines: {node: '>= 0.10'} dev: true - /interval-tree-1d/1.0.4: - resolution: {integrity: sha512-wY8QJH+6wNI0uh4pDQzMvl+478Qh7Rl4qLmqiluxALlNvl+I+o5x38Pw3/z7mDPTPS1dQalZJXsmbvxx5gclhQ==} - dependencies: - binary-search-bounds: 2.0.5 - dev: false - /invariant/2.2.4: resolution: {integrity: sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==} dependencies: loose-envify: 1.4.0 dev: false - /invert-permutation/1.0.0: - resolution: {integrity: sha512-8f473/KSrnvyBd7Khr4PC5wPkAOehwkGc+AH5Q7D+U/fE+cdDob2FJ3naXAs4mspR9JIaEwbDI3me8H0KlVzSQ==} - dev: false - - /iota-array/1.0.0: - resolution: {integrity: sha512-pZ2xT+LOHckCatGQ3DcG/a+QuEqvoxqkiL7tvE8nn3uuu+f6i1TtpB5/FtWFbxUuVr5PZCx8KskuGatbJDXOWA==} - dev: false - /ip-regex/2.1.0: resolution: {integrity: sha512-58yWmlHpp7VYfcdTwMTvwMmqx/Elfxjd9RXTDyMsbL7lLWmhMylLEqiYVLKuLzOZqVgiWXD9MfR62Vv89VRxkw==} engines: {node: '>=4'} dev: false - /ip/1.1.8: - resolution: {integrity: sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==} + /ip/1.1.9: + resolution: {integrity: sha512-cyRxvOEpNHNtchU3Ln9KC/auJgup87llfQpQ+t5ghoC/UhL16SWzbueiCsdTnWmqAWl7LadfuwhlqmtOaqMHdQ==} dev: false /ipaddr.js/1.9.1: @@ -9543,6 +9054,10 @@ packages: resolution: {integrity: sha512-wW/SXnYJkTjs++tVK5b6kVITZpAZPtUrt9SF80vvxGiF/Oywal+COk1jlRkiVq15RFNEQKQY31TkV24/1T5cVg==} dev: false + /is-mobile/4.0.0: + resolution: {integrity: sha512-mlcHZA84t1qLSuWkt2v0I2l61PYdyQDt4aG1mLIXF5FDMm4+haBCxCPYSr/uwqQNRk1MiTizn0ypEuRAOLRAew==} + dev: false + /is-negated-glob/1.0.0: resolution: {integrity: sha512-czXVVn/QEmgvej1f50BZ648vUI+em0xqMq2Sn+QncCLN4zj1UAxlT+kw/6ggQTOaZPd1HqKQGEqbpQVtJucWug==} engines: {node: '>=0.10.0'} @@ -10449,7 +9964,7 @@ packages: connect-pause: 0.1.1 cors: 2.8.5 errorhandler: 1.5.1 - express: 4.18.2 + express: 4.19.2 express-urlrewrite: 1.4.0 json-parse-helpfulerror: 1.0.3 lodash: 4.17.21 @@ -10622,10 +10137,6 @@ packages: deprecated: use String.prototype.padStart() dev: false - /lerp/1.0.3: - resolution: {integrity: sha512-70Rh4rCkJDvwWiTsyZ1HmJGvnyfFah4m6iTux29XmasRiZPDBpT9Cfa4ai73+uLZxnlKruUS62jj2lb11wURiA==} - dev: false - /less-loader/5.0.0_less@3.13.1: resolution: {integrity: sha512-bquCU89mO/yWLaUq0Clk7qCsKhsF/TZpJUzETRvJa9KSVEL9SO3ovCvdEHISBhrC81OwC8QSVX7E0bzElZj9cg==} engines: {node: '>= 4.8.0'} @@ -10792,6 +10303,10 @@ packages: resolution: {integrity: sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==} dev: false + /lodash.merge/4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + dev: false + /lodash.sortby/4.7.0: resolution: {integrity: sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==} dev: false @@ -10982,49 +10497,11 @@ packages: vt-pbf: 3.1.3 dev: false - /marching-simplex-table/1.0.0: - resolution: {integrity: sha512-PexXXVF4f5Bux3vGCNlRRBqF/GyTerNo77PbBz8g/MFFXv212b48IGVglj/VfaYBRY6vlFQffa9dFbCCN0+7LA==} - dependencies: - convex-hull: 1.0.3 - dev: false - - /mat4-decompose/1.0.4: - resolution: {integrity: sha512-M3x6GXrzRTt5Ok4/bcHFc869Pe8F3uWaSp3xkUpi+uaTRulPXIZ1GWD13Z3A8WK2bxTrcvX21mjp05gUy/Dwbw==} - dependencies: - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - dev: false - - /mat4-interpolate/1.0.4: - resolution: {integrity: sha512-+ulnoc6GUHq8eGZGbLyhQU61tx2oeNAFilV/xzCCzLV+F3nDk8jqERUqRmx8eNMMMvrdvoRSw0JXmnisfVPY9A==} - dependencies: - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - mat4-decompose: 1.0.4 - mat4-recompose: 1.0.4 - quat-slerp: 1.0.1 - dev: false - - /mat4-recompose/1.0.4: - resolution: {integrity: sha512-s1P2Yl4LQxq8dN0CgJE+mCO8y3IX/SmauSZ+H0zJsE1UKlgJ9loInfPC/OUxn2MzUW9bfBZf0Wcc2QKA3/e6FQ==} - dependencies: - gl-mat4: 1.2.0 - dev: false - /math-log2/1.0.1: resolution: {integrity: sha512-9W0yGtkaMAkf74XGYVy4Dqw3YUMnTNB2eeiw9aQbUl4A3KmuCEHTt2DgAB07ENzOYAjsYSAYufkAq0Zd+jU7zA==} engines: {node: '>=0.10.0'} dev: false - /matrix-camera-controller/2.1.4: - resolution: {integrity: sha512-zsPGPONclrKSImNpqqKDTcqFpWLAIwMXEJtCde4IFPOw1dA9udzFg4HOFytOTosOFanchrx7+Hqq6glLATIxBA==} - dependencies: - binary-search-bounds: 2.0.5 - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - mat4-interpolate: 1.0.4 - dev: false - /md5.js/1.3.5: resolution: {integrity: sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==} dependencies: @@ -11336,12 +10813,6 @@ packages: resolution: {integrity: sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==} dev: false - /monotone-convex-hull-2d/1.0.1: - resolution: {integrity: sha512-ixQ3qdXTVHvR7eAoOjKY8kGxl9YjOFtzi7qOjwmFFPfBqZHVOjUFOBy/Dk9dusamRSPJe9ggyfSypRbs0Bl8BA==} - dependencies: - robust-orientation: 1.2.1 - dev: false - /morgan/1.10.0: resolution: {integrity: sha512-AbegBVI4sh6El+1gNwvD5YIck7nSA36weD7xvIxG4in80j/UoK8AEGaWnnz8v1GxonMCltmlNs5ZKbGvl9b1XQ==} engines: {node: '>= 0.8.0'} @@ -11433,6 +10904,7 @@ packages: /nan/2.17.0: resolution: {integrity: sha512-2ZTgtl0nJsO0KQCjEpxcIr5D+Yv90plTitZt9JBfQvVJDS5seMl3FOvsh3+9CoYWXf/1l5OaZzzF6nDm4cagaQ==} + requiresBuild: true dev: false optional: true @@ -11477,6 +10949,10 @@ packages: - supports-color dev: false + /native-promise-only/0.8.1: + resolution: {integrity: sha512-zkVhZUA3y8mbz652WrL5x0fB0ehrBkulWT3TomAQ9iDtyXZvzKeEA6GPxAItBYeNYl5yngKRX612qHOhvMkDeg==} + dev: false + /native-request/1.1.0: resolution: {integrity: sha512-uZ5rQaeRn15XmpgE0xoPL8YWqcX90VtCFglYwAgkvKM5e8fog+vePLAhHxuuv/gRkrQxIeh5U3q9sMNUrENqWw==} requiresBuild: true @@ -11486,55 +10962,16 @@ packages: /natural-compare/1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} - /ndarray-extract-contour/1.0.1: - resolution: {integrity: sha512-iDngNoFRqrqbXGLP8BzyGrybw/Jnkkn7jphzc3ZFfO7dfmpL1Ph74/6xCi3xSvJFyVW90XpMnd766jTaRPsTCg==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /ndarray-gradient/1.0.1: - resolution: {integrity: sha512-+xONVi7xxTCGL6KOb11Yyoe0tPNqAUKF39CvFoRjL5pdOmPd2G2pckK9lD5bpLF3q45LLnYNyiUSJSdNmQ2MTg==} - dependencies: - cwise-compiler: 1.1.3 - dup: 1.0.0 - dev: false - - /ndarray-linear-interpolate/1.0.0: - resolution: {integrity: sha512-UN0f4+6XWsQzJ2pP5gVp+kKn5tJed6mA3K/L50uO619+7LKrjcSNdcerhpqxYaSkbxNJuEN76N05yBBJySnZDw==} - dev: false - - /ndarray-ops/1.2.2: - resolution: {integrity: sha512-BppWAFRjMYF7N/r6Ie51q6D4fs0iiGmeXIACKY66fLpnwIui3Wc3CXiD/30mgLbDjPpSLrsqcp3Z62+IcHZsDw==} - dependencies: - cwise-compiler: 1.1.3 - dev: false - - /ndarray-pack/1.2.1: - resolution: {integrity: sha512-51cECUJMT0rUZNQa09EoKsnFeDL4x2dHRT0VR5U2H5ZgEcm95ZDWcMA5JShroXjHOejmAD/fg8+H+OvUnVXz2g==} - dependencies: - cwise-compiler: 1.1.3 - ndarray: 1.0.19 - dev: false - - /ndarray-scratch/1.2.0: - resolution: {integrity: sha512-a4pASwB1jQyJcKLYrwrladVfDZDUGc78qLJZbHyb1Q4rhte0URhzc6ALQpBcauwgov0sXLwZz3vYH5jKAhSMIg==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - - /ndarray-sort/1.0.1: - resolution: {integrity: sha512-Gpyis5NvEPOQVadDOG+Dx8bhYCkaxn5IlA4Ig/jBJIlnW1caDiPneQLzT/+AIMeHEmqlGZfdqO/I1TXJS2neAw==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /ndarray/1.0.19: - resolution: {integrity: sha512-B4JHA4vdyZU30ELBw3g7/p9bZupyew5a7tX1Y/gGeF2hafrPaQZhgrGQfsvgfYbgdFZjYwuEcnaobeM/WMW+HQ==} + /needle/2.9.1: + resolution: {integrity: sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ==} + engines: {node: '>= 4.4.x'} + hasBin: true dependencies: - iota-array: 1.0.0 - is-buffer: 1.1.6 + debug: 3.2.7 + iconv-lite: 0.4.24 + sax: 1.2.4 + transitivePeerDependencies: + - supports-color dev: false /negotiator/0.6.3: @@ -11549,12 +10986,6 @@ packages: resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==} dev: false - /nextafter/1.0.0: - resolution: {integrity: sha512-7PO+A89Tll2rSEfyrjtqO0MaI37+nnxBdnQcPypfbEYYuGaJxWGCqaOwQX4a3GHNTS08l1kazuiLEWZniZjMUQ==} - dependencies: - double-bits: 1.1.1 - dev: false - /nice-try/1.0.5: resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} @@ -11681,10 +11112,6 @@ packages: engines: {node: '>=8'} dev: true - /normals/1.1.0: - resolution: {integrity: sha512-XWeliW48BLvbVJ+cjQAOE+tA0m1M7Yi1iTPphAS9tBmW1A/c/cOVnEUecPCCMH5lEAihAcG6IRle56ls9k3xug==} - dev: false - /npm-run-all/4.1.5: resolution: {integrity: sha512-Oo82gJDAVcaMdi3nuoKFavkIHBRVqQ1qvMb+9LHk/cF4P6B2m8aP04hGf7oL6wZ9BuGwX1onlLhpuoofSyoQDQ==} engines: {node: '>= 4'} @@ -11730,10 +11157,6 @@ packages: is-finite: 1.1.0 dev: false - /numeric/1.2.6: - resolution: {integrity: sha512-avBiDAP8siMa7AfJgYyuxw1oyII4z2sswS23+O+ZfV28KrtNzy0wxUFwi4f3RyM4eeeXNs1CThxR7pb5QQcMiw==} - dev: false - /nwsapi/2.2.7: resolution: {integrity: sha512-ub5E4+FBPKwAZx0UwIQOjYWGHTEq5sPqHQNRN8Z9e4A7u3Tj1weLJsL59yH9vmvqEtBHaOmT6cYQKIZOxp35FQ==} dev: false @@ -11946,13 +11369,6 @@ packages: type-check: 0.3.2 word-wrap: 1.2.5 - /orbit-camera-controller/4.0.0: - resolution: {integrity: sha512-/XTmpr6FUT6MuKPBGN2nv9cS8jhhVs8do71VagBQS5p4rxM04MhqSnI/Uu+gVNN5s6KPcS73o1dHzjuDThEJUA==} - dependencies: - filtered-vector: 1.2.5 - gl-mat4: 1.2.0 - dev: false - /os-browserify/0.3.0: resolution: {integrity: sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==} @@ -12053,13 +11469,6 @@ packages: semver: 6.3.1 dev: true - /pad-left/1.0.2: - resolution: {integrity: sha512-saxSV1EYAytuZDtQYEwi0DPzooG6aN18xyHrnJtzwjVwmMauzkEecd7hynVJGolNGk1Pl9tltmZqfze4TZTCxg==} - engines: {node: '>=0.10.0'} - dependencies: - repeat-string: 1.6.1 - dev: false - /pako/1.0.11: resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==} @@ -12097,6 +11506,17 @@ packages: pbkdf2: 3.1.2 safe-buffer: 5.2.1 + /parse-asn1/5.1.7: + resolution: {integrity: sha512-CTM5kuWR3sx9IFamcl5ErfPl6ea/N8IYwiJ+vpeB2g+1iknv7zBl5uPwbMbRVznRVbrNY6lGuDoE5b30grmbqg==} + engines: {node: '>= 0.10'} + dependencies: + asn1.js: 4.10.1 + browserify-aes: 1.2.0 + evp_bytestokey: 1.0.3 + hash-base: 3.0.4 + pbkdf2: 3.1.2 + safe-buffer: 5.2.1 + /parse-json/2.2.0: resolution: {integrity: sha512-QR/GGaKCkhwk1ePQNYDRKYZ3mwU9ypsKhB0XyFnLQdomyEqk3e8wpW3V5Jp88zbxK4n5ST1nqo+g9juTpownhQ==} engines: {node: '>=0.10.0'} @@ -12248,19 +11668,6 @@ packages: /performance-now/2.1.0: resolution: {integrity: sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==} - /permutation-parity/1.0.0: - resolution: {integrity: sha512-mRaEvnnWolbZuErWD08StRUZP9YOWG3cURP5nYpRg1D2PENzPXCUrPv8/bOk0tfln0hISLZjOdOcQCbsVpL2nQ==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /permutation-rank/1.0.0: - resolution: {integrity: sha512-kmXwlQcd4JlV8g61jz0xDyroFNlJ/mP+KbSBllMuQD7FvaQInRnnAStElcppkUXd8qVFLvemy6msUmBn7sDzHg==} - dependencies: - invert-permutation: 1.0.0 - typedarray-pool: 1.2.0 - dev: false - /pick-by-alias/1.2.0: resolution: {integrity: sha512-ESj2+eBxhGrcA1azgHs7lARG5+5iLakc/6nlfbpjcLl00HuuUOIuORhYXN4D1HfvMSKuVtFQjAlnwi1JHEeDIw==} dev: false @@ -12340,94 +11747,56 @@ packages: find-up: 3.0.0 dev: false - /planar-dual/1.0.2: - resolution: {integrity: sha512-jfQCbX1kXu53+enC+BPQlfoZI1u5m8IUhFVtFG+9tUj84wnuaYNheR69avYWCNXWnUCkwUajmYMqX9M2Ruh4ug==} - dependencies: - compare-angle: 1.0.1 - dup: 1.0.0 - dev: false - - /planar-graph-to-polyline/1.0.6: - resolution: {integrity: sha512-h8a9kdAjo7mRhC0X6HZ42xzFp7vKDZA+Hygyhsq/08Qi4vVAQYJaLLYLvKUUzRbVKvdYqq0reXHyV0EygyEBHA==} - dependencies: - edges-to-adjacency-list: 1.0.0 - planar-dual: 1.0.2 - point-in-big-polygon: 2.0.1 - robust-orientation: 1.2.1 - robust-sum: 1.0.0 - two-product: 1.0.2 - uniq: 1.0.1 - dev: false - /please-upgrade-node/3.2.0: resolution: {integrity: sha512-gQR3WpIgNIKwBMVLkpMUeR3e1/E1y42bqDQZfql+kDeXd8COYfM8PQA4X6y7a8u9Ua9FHmsrrmirW2vHs45hWg==} dependencies: semver-compare: 1.0.0 dev: true - /plotly.js/1.58.5: - resolution: {integrity: sha512-ChTlnFXB4tB0CzcG1mqgUKYnrJsZ8REDGox8BHAa/ltsd48MOAhOmFgjyDxwsXyjjgwOI296GeYDft8g4ftLHQ==} + /plotly.js/2.25.2: + resolution: {integrity: sha512-Pf6dPYGl21W7A3FTgLQ52fpgvrqGhCPDT3+612bxwg4QXlvxhnoFwvuhT1BRW/l2nbYGpRoUH79K54yf2vCMVQ==} dependencies: + '@plotly/d3': 3.8.1 '@plotly/d3-sankey': 0.7.2 '@plotly/d3-sankey-circular': 0.33.1 - '@plotly/point-cluster': 3.1.9 '@turf/area': 6.5.0 '@turf/bbox': 6.5.0 '@turf/centroid': 6.5.0 - alpha-shape: 1.0.0 canvas-fit: 1.5.0 color-alpha: 1.0.4 color-normalize: 1.5.0 color-parse: 1.3.8 color-rgba: 2.1.1 - convex-hull: 1.0.3 country-regex: 1.1.0 - d3: 3.5.17 d3-force: 1.2.1 + d3-format: 1.4.5 + d3-geo: 1.12.1 + d3-geo-projection: 2.9.0 d3-hierarchy: 1.1.9 - d3-interpolate: 1.4.0 + d3-interpolate: 3.0.1 + d3-time: 1.1.0 d3-time-format: 2.3.0 - delaunay-triangulate: 1.1.6 - es6-promise: 4.2.8 fast-isnumeric: 1.1.4 - gl-cone3d: 1.5.2 - gl-contour2d: 1.1.7 - gl-error3d: 1.0.16 - gl-heatmap2d: 1.1.1 - gl-line3d: 1.2.1 gl-mat4: 1.2.0 - gl-mesh3d: 2.3.1 - gl-plot2d: 1.4.5 - gl-plot3d: 2.4.7 - gl-pointcloud2d: 1.0.3 - gl-scatter3d: 1.2.3 - gl-select-box: 1.0.4 - gl-spikes2d: 1.0.2 - gl-streamtube3d: 1.4.1 - gl-surface3d: 1.6.0 gl-text: 1.3.1 glslify: 7.1.1 has-hover: 1.0.1 has-passive-events: 1.0.0 - image-size: 0.7.5 - is-mobile: 2.2.2 + is-mobile: 4.0.0 mapbox-gl: 1.10.1 - matrix-camera-controller: 2.1.4 mouse-change: 1.4.0 mouse-event-offset: 3.0.2 mouse-wheel: 1.2.0 - ndarray: 1.0.19 - ndarray-linear-interpolate: 1.0.0 + native-promise-only: 0.8.1 parse-svg-path: 0.1.2 + point-in-polygon: 1.1.0 polybooljs: 1.2.0 - regl: 1.7.0 + probe-image-size: 7.2.3 + regl: /@plotly/regl/2.1.2 regl-error2d: 2.0.12 regl-line2d: 3.1.2 regl-scatter2d: 3.2.9 regl-splom: 1.0.14 - right-now: 1.0.0 - robust-orientation: 1.2.1 - sane-topojson: 4.0.0 strongly-connected-components: 1.0.1 superscript-text: 1.0.0 svg-path-sdf: 1.1.3 @@ -12436,6 +11805,8 @@ packages: topojson-client: 3.1.0 webgl-context: 2.2.0 world-calendars: 1.0.3 + transitivePeerDependencies: + - supports-color dev: false /plur/3.1.1: @@ -12463,25 +11834,14 @@ packages: - typescript dev: false - /point-in-big-polygon/2.0.1: - resolution: {integrity: sha512-DtrN8pa2VfMlvmWlCcypTFeBE4+OYz1ojDNJLKCWa4doiVAD6PRBbxFYAT71tsp5oKaRXT5sxEiHCAQKb1zr2Q==} - dependencies: - binary-search-bounds: 2.0.5 - interval-tree-1d: 1.0.4 - robust-orientation: 1.2.1 - slab-decomposition: 1.0.3 + /point-in-polygon/1.1.0: + resolution: {integrity: sha512-3ojrFwjnnw8Q9242TzgXuTD+eKiutbzyslcq1ydfu82Db2y+Ogbmyrkpv0Hgj31qwT3lbS9+QAAO/pIQM35XRw==} dev: false /polybooljs/1.2.0: resolution: {integrity: sha512-mKjR5nolISvF+q2BtC1fi/llpxBPTQ3wLWN8+ldzdw2Hocpc8C72ZqnamCM4Z6z+68GVVjkeM01WJegQmZ8MEQ==} dev: false - /polytope-closest-point/1.0.0: - resolution: {integrity: sha512-rvmt1e2ci9AUyWeHg+jsNuhGC4eBtxX4WjD9uDdvQzv2I1CVJSgbblJTslNXpGUu4KZSsUtSzvIdHKRKfRF3kw==} - dependencies: - numeric: 1.2.6 - dev: false - /portfinder/1.0.32_supports-color@6.1.0: resolution: {integrity: sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==} engines: {node: '>= 0.12.0'} @@ -13214,6 +12574,16 @@ packages: parse-ms: 2.1.0 dev: false + /probe-image-size/7.2.3: + resolution: {integrity: sha512-HubhG4Rb2UH8YtV4ba0Vp5bQ7L78RTONYu/ujmCu5nBI8wGv24s4E9xSKBi0N1MowRpxk76pFCpJtW0KPzOK0w==} + dependencies: + lodash.merge: 4.6.2 + needle: 2.9.1 + stream-parser: 0.3.1 + transitivePeerDependencies: + - supports-color + dev: false + /process-nextick-args/2.0.1: resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==} @@ -13288,6 +12658,10 @@ packages: forwarded: 0.2.0 ipaddr.js: 1.9.1 + /proxy-from-env/1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + dev: false + /prr/1.0.1: resolution: {integrity: sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==} dev: false @@ -13365,12 +12739,6 @@ packages: resolution: {integrity: sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==} engines: {node: '>=0.6'} - /quat-slerp/1.0.1: - resolution: {integrity: sha512-OTozCDeP5sW7cloGR+aIycctZasBhblk1xdsSGP1Iz5pEwDqyChloTmc96xsDfusFD7GRxwDDu+tpJX0Wa1kJw==} - dependencies: - gl-quat: 1.0.0 - dev: false - /query-string/4.3.4: resolution: {integrity: sha512-O2XLNDBIg1DnTOa+2XrIwSiXEV8h2KImXUnjhhn2+UsvZ+Es2uyd5CCRTNQlDGbzUQOW3aYCBx9rVA6dzsiY7Q==} engines: {node: '>=0.10.0'} @@ -13417,21 +12785,6 @@ packages: resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} engines: {node: '>= 0.6'} - /rat-vec/1.1.1: - resolution: {integrity: sha512-FbxGwkQxmw4Jx41LR7yMOR+g8M9TWCEmf/SUBQVLuK2eh0nThnffF7IUualr3XE2x5F8AdLiCVeSGwXd4snfgg==} - dependencies: - big-rat: 1.0.4 - dev: false - - /raw-body/2.5.1: - resolution: {integrity: sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==} - engines: {node: '>= 0.8'} - dependencies: - bytes: 3.1.2 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - unpipe: 1.0.0 - /raw-body/2.5.2: resolution: {integrity: sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==} engines: {node: '>= 0.8'} @@ -13440,7 +12793,6 @@ packages: http-errors: 2.0.0 iconv-lite: 0.4.24 unpipe: 1.0.0 - dev: true /rc-align/2.4.5: resolution: {integrity: sha512-nv9wYUYdfyfK+qskThf4BQUSIadeI/dCsfaMZfNEoxm9HwOIioQ+LyqmMK6jWHAZQgOzMLaqawhuBXlF63vgjw==} @@ -14069,13 +13421,13 @@ packages: resolution: {integrity: sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==} dev: false - /react-plotly.js/2.6.0_f6dluzp62qf57yw3gl4ocsg3e4: + /react-plotly.js/2.6.0_qtjenpcawcnnxnr626ndcvhi4u: resolution: {integrity: sha512-g93xcyhAVCSt9kV1svqG1clAEdL6k3U+jjuSzfTV7owaSU9Go6Ph8bl25J+jKfKvIGAEYpe4qj++WHJuc9IaeA==} peerDependencies: plotly.js: '>1.34.0' react: '>0.13.0' dependencies: - plotly.js: 1.58.5 + plotly.js: 2.25.2 prop-types: 15.8.1 react: 16.14.0 dev: false @@ -14391,14 +13743,6 @@ packages: strip-indent: 2.0.0 dev: true - /reduce-simplicial-complex/1.0.0: - resolution: {integrity: sha512-t+nT7sHDtcxBx8TbglqfLsLKoFiSn9hp6GFojJEThHBAFv72wQeq/uRiPYZa4Xb8FR1Ye1foRcBV3Ki6bgm+pQ==} - dependencies: - cell-orientation: 1.0.1 - compare-cell: 1.0.0 - compare-oriented-cell: 1.0.1 - dev: false - /reflect.getprototypeof/1.0.3: resolution: {integrity: sha512-TTAOZpkJ2YLxl7mVHWrNo3iDMEkYlva/kgFcXndqMgbo/AZUmmavEkdXV+hXtE4P8xdyEKRzalaFqZVuwIk/Nw==} engines: {node: '>= 0.4'} @@ -14567,10 +13911,6 @@ packages: regl-scatter2d: 3.2.9 dev: false - /regl/1.7.0: - resolution: {integrity: sha512-bEAtp/qrtKucxXSJkD4ebopFZYP0q1+3Vb2WECWv/T8yQEgKxDxJ7ztO285tAMaYZVR6mM1GgI6CCn8FROtL1w==} - dev: false - /regl/2.1.0: resolution: {integrity: sha512-oWUce/aVoEvW5l2V0LK7O5KJMzUSKeiOwFuJehzpSFd43dO5spP9r+sSUfhKtsky4u6MCqWJaRL+abzExynfTg==} dev: false @@ -14830,78 +14170,6 @@ packages: classnames: 2.3.2 dev: false - /robust-compress/1.0.0: - resolution: {integrity: sha512-E8btSpQ6zZr7LvRLrLvb+N5rwQ0etUbsXFKv5NQj6TVK6RYT00Qg9iVFvIWR+GxXUvpes7FDN0WfXa3l7wtGOw==} - dev: false - - /robust-determinant/1.1.0: - resolution: {integrity: sha512-xva9bx/vyAv3pVYL2++vlnvM9q7oQOeCS5iscmlWtmaXHEgI4GFWeuYPUVVhvmYwx9N49EsQTonVJihYtcMo1Q==} - dependencies: - robust-compress: 1.0.0 - robust-scale: 1.0.2 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-dot-product/1.0.0: - resolution: {integrity: sha512-Nu/wah8B8RotyZLRPdlEL0ZDh3b7wSwUBLdbTHwS/yw0qqjMJ943PSCkd6EsF5R5QFDWF2x77DGsbmnv9/7/ew==} - dependencies: - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-in-sphere/1.2.1: - resolution: {integrity: sha512-3zJdcMIOP1gdwux93MKTS0RiMYEGwQBoE5R1IW/9ZQmGeZzP7f7i4+xdcK8ujJvF/dEOS1WPuI9IB1WNFbj3Cg==} - dependencies: - robust-scale: 1.0.2 - robust-subtract: 1.0.0 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-linear-solve/1.0.0: - resolution: {integrity: sha512-I1qW8Bl9+UYeGNh2Vt8cwkcD74xWMyjnU6lSVcZrf0eyfwPmreflY3v0SvqCZOj5ddxnSS1Xp31igbFNcg1TGQ==} - dependencies: - robust-determinant: 1.1.0 - dev: false - - /robust-orientation/1.2.1: - resolution: {integrity: sha512-FuTptgKwY6iNuU15nrIJDLjXzCChWB+T4AvksRtwPS/WZ3HuP1CElCm1t+OBfgQKfWbtZIawip+61k7+buRKAg==} - dependencies: - robust-scale: 1.0.2 - robust-subtract: 1.0.0 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-product/1.0.0: - resolution: {integrity: sha512-7ww6m+ICW6Dt7ylHVy1aeeNwTfMXfh2BHqHVNE+CHvrU9sI97Vb6uHnid0MN3I9afTI5DXOB7q4SQa2fxuo2Gw==} - dependencies: - robust-scale: 1.0.2 - robust-sum: 1.0.0 - dev: false - - /robust-scale/1.0.2: - resolution: {integrity: sha512-jBR91a/vomMAzazwpsPTPeuTPPmWBacwA+WYGNKcRGSh6xweuQ2ZbjRZ4v792/bZOhRKXRiQH0F48AvuajY0tQ==} - dependencies: - two-product: 1.0.2 - two-sum: 1.0.0 - dev: false - - /robust-segment-intersect/1.0.1: - resolution: {integrity: sha512-QWngxcL7rCRLK7nTMcTNBPi/q+fecrOo6aOtTPnXjT/Dve5AK20DzUSq2fznUS+rCAxyir6OdPgDCzcUxFtJoQ==} - dependencies: - robust-orientation: 1.2.1 - dev: false - - /robust-subtract/1.0.0: - resolution: {integrity: sha512-xhKUno+Rl+trmxAIVwjQMiVdpF5llxytozXJOdoT4eTIqmqsndQqFb1A0oiW3sZGlhMRhOi6pAD4MF1YYW6o/A==} - dev: false - - /robust-sum/1.0.0: - resolution: {integrity: sha512-AvLExwpaqUqD1uwLU6MwzzfRdaI6VEZsyvQ3IAQ0ZJ08v1H+DTyqskrf2ZJyh0BDduFVLN7H04Zmc+qTiahhAw==} - dev: false - /rsvp/4.8.5: resolution: {integrity: sha512-nfMOlASu9OnRJo1mbEk2cz0D56a1MBNrJ7orjRZQG10XDyuvwksKbuXNp6qa+kbn839HwjwhBzhFmdsaEAfauA==} engines: {node: 6.* || >= 7.*} @@ -14963,10 +14231,6 @@ packages: /safer-buffer/2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} - /sane-topojson/4.0.0: - resolution: {integrity: sha512-bJILrpBboQfabG3BNnHI2hZl52pbt80BE09u4WhnrmzuF2JbMKZdl62G5glXskJ46p+gxE2IzOwGj/awR4g8AA==} - dev: false - /sane/4.1.0: resolution: {integrity: sha512-hhbzAgTIX8O7SHfp2c8/kREfEn4qO/9q8C9beyY6+tvZ87EpoZ3i1RIEvp27YBswnNbY9mWd6paKVmKbAgLfZA==} engines: {node: 6.* || 8.* || >= 10.*} @@ -15291,10 +14555,6 @@ packages: /signal-exit/3.0.7: resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} - /signum/0.0.0: - resolution: {integrity: sha512-nct2ZUmwemVxeuPY5h+JLpHGJvLCXXNahGVI7IB3a6Fy5baX9AGSb854HceYH4FBw4eGjoZfEo9YRfkGfKdZQA==} - dev: false - /signum/1.0.0: resolution: {integrity: sha512-yodFGwcyt59XRh7w5W3jPcIQb3Bwi21suEfT7MAWnBX3iCdklJpgDgvGT9o04UonglZN5SNMfJFkHIR/jO8GHw==} dev: false @@ -15305,55 +14565,10 @@ packages: is-arrayish: 0.3.2 dev: false - /simplicial-complex-boundary/1.0.1: - resolution: {integrity: sha512-hz/AaVbs+s08EVoxlbCE68AlC6/mxFJLxJrGRMbDoTjz3030nhcOq+w5+f0/ZaU2EYjmwa8CdVKpiRVIrhaZjA==} - dependencies: - boundary-cells: 2.0.2 - reduce-simplicial-complex: 1.0.0 - dev: false - - /simplicial-complex-contour/1.0.2: - resolution: {integrity: sha512-Janyqvpa7jgr9MJbwR/XGyYz7bdhXNq7zgHxD0G54LCRNyn4bf3Hely2iWQeK/IGu3c5BaWFUh7ElxqXhKrq0g==} - dependencies: - marching-simplex-table: 1.0.0 - ndarray: 1.0.19 - ndarray-sort: 1.0.1 - typedarray-pool: 1.2.0 - dev: false - - /simplicial-complex/0.3.3: - resolution: {integrity: sha512-JFSxp7I5yORuKSuwGN96thhkqZVvYB4pkTMkk+PKP2QsOYYU1e84OBoHwOpFyFmjyvB9B3UDZKzHQI5S/CPUPA==} - dependencies: - bit-twiddle: 0.0.2 - union-find: 0.0.4 - dev: false - - /simplicial-complex/1.0.0: - resolution: {integrity: sha512-mHauIKSOy3GquM5VnYEiu7eP5y4A8BiaN9ezUUgyYFz1k68PqDYcyaH3kenp2cyvWZE96QKE3nrxYw65Allqiw==} - dependencies: - bit-twiddle: 1.0.2 - union-find: 1.0.2 - dev: false - - /simplify-planar-graph/2.0.1: - resolution: {integrity: sha512-KdC2ZPFvrGl9+lH/P3Yik7G0si2Zpk6Xiqjq8l9U1lOox5a/9dGLjevi9tvqoh4V7yQbs7fs6+rNCOAdrzUktw==} - dependencies: - robust-orientation: 1.2.1 - simplicial-complex: 0.3.3 - dev: false - /sisteransi/1.0.5: resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} dev: false - /slab-decomposition/1.0.3: - resolution: {integrity: sha512-1EfR304JHvX9vYQkUi4AKqN62mLsjk6W45xTk/TxwN8zd3HGwS7PVj9zj0I6fgCZqfGlimDEY+RzzASHn97ZmQ==} - dependencies: - binary-search-bounds: 2.0.5 - functional-red-black-tree: 1.0.1 - robust-orientation: 1.2.1 - dev: false - /slash/1.0.0: resolution: {integrity: sha512-3TYDR7xWt4dIqV2JauJr+EJeW356RXijHeUlO+8djJ+uBXPn8/2dpzBc8yQhh583sVvc9CvFAeQVgijsH+PNNg==} engines: {node: '>=0.10.0'} @@ -15526,13 +14741,6 @@ packages: - supports-color dev: false - /split-polygon/1.0.0: - resolution: {integrity: sha512-nBFcgQUVEE8dcOjuKaRdlM53k8RxUYpRxZ//n0pHJQGhbVscrsti+gllJI3pK3y7fgFwGWgt7NFhAX5sz0UoWQ==} - dependencies: - robust-dot-product: 1.0.0 - robust-sum: 1.0.0 - dev: false - /split-string/3.1.0: resolution: {integrity: sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==} engines: {node: '>=0.10.0'} @@ -15542,10 +14750,6 @@ packages: /sprintf-js/1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - /sprintf-js/1.1.2: - resolution: {integrity: sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==} - dev: false - /sshpk/1.17.0: resolution: {integrity: sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==} engines: {node: '>=0.10.0'} @@ -15646,6 +14850,14 @@ packages: to-arraybuffer: 1.0.1 xtend: 4.0.2 + /stream-parser/0.3.1: + resolution: {integrity: sha512-bJ/HgKq41nlKvlhccD5kaCr/P+Hu0wPNKPJOH7en+YrJu/9EgqUF+88w5Jb6KNcjOFMhfX4B2asfeAtIGuHObQ==} + dependencies: + debug: 2.6.9 + transitivePeerDependencies: + - supports-color + dev: false + /stream-shift/1.0.1: resolution: {integrity: sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==} dev: false @@ -15900,14 +15112,6 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} - /surface-nets/1.0.2: - resolution: {integrity: sha512-Se+BaCb5yc8AV1IfT6TwTWEe/KuzzjzcMQQCbcIahzk9xRO5bIxxGM2MmKxE9nmq8+RD8DLBLXu0BjXoRs21iw==} - dependencies: - ndarray-extract-contour: 1.0.1 - triangulate-hypercube: 1.0.1 - zero-crossings: 1.0.1 - dev: false - /svg-arc-to-cubic-bezier/3.2.0: resolution: {integrity: sha512-djbJ/vZKZO+gPoSDThGNpKDO+o+bAeA4XQKovvkNCqnIS2t+S4qnLAGQhyyrulhCFRl1WWzAp0wUDV8PpTVU3g==} dev: false @@ -16050,12 +15254,6 @@ packages: require-main-filename: 2.0.0 dev: false - /text-cache/4.2.2: - resolution: {integrity: sha512-zky+UDYiX0a/aPw/YTBD+EzKMlCTu1chFuCMZeAkgoRiceySdROu1V2kJXhCbtEdBhiOviYnAdGiSYl58HW0ZQ==} - dependencies: - vectorize-text: 3.2.2 - dev: false - /text-table/0.2.0: resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} @@ -16207,20 +15405,6 @@ packages: punycode: 2.3.0 dev: false - /triangulate-hypercube/1.0.1: - resolution: {integrity: sha512-SAIacSBfUNfgeCna8q2i+1taOtFJkYuOqpduaJ1KUeOJpqc0lLKMYzPnZb4CA6KCOiD8Pd4YbuVq41wa9dvWyw==} - dependencies: - gamma: 0.1.0 - permutation-parity: 1.0.0 - permutation-rank: 1.0.0 - dev: false - - /triangulate-polyline/1.0.3: - resolution: {integrity: sha512-crJcVFtVPFYQ8r9iIhe9JqkauDvNWDSZLot8ly3DniSCO+zyUfKbtfD3fEoBaA5uMrQU/zBi11NBuVQeSToToQ==} - dependencies: - cdt2d: 1.0.0 - dev: false - /trim-newlines/2.0.0: resolution: {integrity: sha512-MTBWv3jhVjTU7XR3IQHllbiJs8sc75a80OEhB6or/q7pLTWgQ0bMGQXXYQSrSuXe6WiKWDZ5txXY5P59a/coVA==} engines: {node: '>=4'} @@ -16271,25 +15455,9 @@ packages: dependencies: safe-buffer: 5.2.1 - /turntable-camera-controller/3.0.1: - resolution: {integrity: sha512-UOGu9W/Mx053pAaczi0BEPqvWJOqSgtpdigWG9C8dX8rQVdyl2hWmpdJW3m15QrGxJtJHIhhDTHVtTZzPkd/FA==} - dependencies: - filtered-vector: 1.2.5 - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - dev: false - /tweetnacl/0.14.5: resolution: {integrity: sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==} - /two-product/1.0.2: - resolution: {integrity: sha512-vOyrqmeYvzjToVM08iU52OFocWT6eB/I5LUWYnxeAPGXAhAxXYU/Yr/R2uY5/5n4bvJQL9AQulIuxpIsMoT8XQ==} - dev: false - - /two-sum/1.0.0: - resolution: {integrity: sha512-phP48e8AawgsNUjEY2WvoIWqdie8PoiDZGxTDv70LDr01uX5wLEQbOgSP7Z/B6+SW5oLtbe8qaYX2fKJs3CGTw==} - dev: false - /type-check/0.3.2: resolution: {integrity: sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==} engines: {node: '>= 0.8.0'} @@ -16430,14 +15598,6 @@ packages: engines: {node: '>=4'} dev: false - /union-find/0.0.4: - resolution: {integrity: sha512-207oken6EyGDCBK5l/LTPsWfgy8N8s6idwRK2TG0ssWhzPlxEDdBA8nIV+eLbkEMdA8pAwE8F7/xwv2sCESVjQ==} - dev: false - - /union-find/1.0.2: - resolution: {integrity: sha512-wFA9bMD/40k7ZcpKVXfu6X1qD3ri5ryO8HUsuA1RnxPCQl66Mu6DgkxyR+XNnd+osD0aLENixcJVFj+uf+O4gw==} - dev: false - /union-value/1.0.1: resolution: {integrity: sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==} engines: {node: '>=0.10.0'} @@ -16684,18 +15844,6 @@ packages: resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} engines: {node: '>= 0.8'} - /vectorize-text/3.2.2: - resolution: {integrity: sha512-34NVOCpMMQVXujU4vb/c6u98h6djI0jGdtC202H4Huvzn48B6ARsR7cmGh1xsAc0pHNQiUKGK/aHF05VtGv+eA==} - dependencies: - cdt2d: 1.0.0 - clean-pslg: 1.1.2 - ndarray: 1.0.19 - planar-graph-to-polyline: 1.0.6 - simplify-planar-graph: 2.0.1 - surface-nets: 1.0.2 - triangulate-polyline: 1.0.3 - dev: false - /vendors/1.0.4: resolution: {integrity: sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==} dev: false @@ -16778,10 +15926,6 @@ packages: resolution: {integrity: sha512-lNR9aAefbGPpHO7AEnY0hCFjz1eTkWCXYvkTRrTHs9qv8zJp+SkVYpzfLIFXQQiG3tVvbNFQgVg2bQS8YGgxyw==} dev: false - /weakmap-shim/1.1.1: - resolution: {integrity: sha512-/wNyG+1FpiHhnfQo+TuA/XAUpvOOkKVl0A4qpT+oGcj5SlZCLmM+M1Py/3Sj8sy+YrEauCVITOxCsZKo6sPbQg==} - dev: false - /webgl-context/2.2.0: resolution: {integrity: sha512-q/fGIivtqTT7PEoF07axFIlHNk/XCPaYpq64btnepopSWvKNFkoORlQYgqDigBIuGA1ExnFd/GnSUnBNEPQY7Q==} dependencies: @@ -16824,12 +15968,12 @@ packages: connect-history-api-fallback: 1.6.0 debug: 4.3.4_supports-color@6.1.0 del: 4.1.1 - express: 4.18.2_supports-color@6.1.0 + express: 4.19.2_supports-color@6.1.0 html-entities: 1.4.0 http-proxy-middleware: 0.19.1_tmpgdztspuwvsxzgjkhoqk7duq import-local: 2.0.0 internal-ip: 4.3.0 - ip: 1.1.8 + ip: 1.1.9 is-absolute-url: 3.0.3 killable: 1.0.1 loglevel: 1.8.1 @@ -17427,9 +16571,3 @@ packages: y18n: 4.0.3 yargs-parser: 15.0.3 dev: true - - /zero-crossings/1.0.1: - resolution: {integrity: sha512-iNIldMZaDtAyIJMJ8NnGVHeejH//y4eVmpXriM+q/B/BPNz+2E7oAgSnw9MXqCd3RbQ8W+hor7T2jEyRoc/s2A==} - dependencies: - cwise-compiler: 1.1.3 - dev: false diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index 19f306ae4f6b..c42bd8c1f91b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -17,7 +17,7 @@ */ import React from 'react'; -import {Table, Icon, Tooltip} from 'antd'; +import {Table, Icon, Tooltip, Popover} from 'antd'; import {PaginationConfig} from 'antd/lib/pagination'; import moment from 'moment'; import {ReplicationIcon} from 'utils/themeIcons'; @@ -192,21 +192,36 @@ const COLUMNS = [ key: 'pipelines', isVisible: true, render: (pipelines: IPipeline[], record: IDatanode) => { + let firstThreePipelinesIDs = []; + let remainingPipelinesIDs: any[] = []; + firstThreePipelinesIDs = pipelines && pipelines.filter((element, index) => index < 3); + remainingPipelinesIDs = pipelines && pipelines.slice(3, pipelines.length); + + const RenderPipelineIds = ({ pipelinesIds }) => { + return pipelinesIds && pipelinesIds.map((pipeline: any, index: any) => ( +

    + + {pipeline.pipelineID} +
    + )) + } + return ( -
    + <> { - pipelines && pipelines.map((pipeline, index) => ( -
    - - {pipeline.pipelineID} -
    - )) + } -
    + { + remainingPipelinesIDs.length > 0 && + } title="Remaining pipelines" placement="rightTop" trigger="hover"> + {`... and ${remainingPipelinesIDs.length} more pipelines`} + + } + ); } }, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less index 15d68dfc8600..a2f4c088c566 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less @@ -24,4 +24,19 @@ .ant-pagination-disabled, .ant-pagination-disabled:hover, .ant-pagination-disabled:focus { color: rgba(0, 0, 0, 0.65); cursor: pointer !important; - } \ No newline at end of file + } + +.multi-select-container { + padding-left: 5px; + margin-right: 5px; + display: inline-block; + min-width: 200px; + z-index: 99; +} + +.limit-block { + font-size: 14px; + font-weight: normal; + display: inline-block; + margin-left: 20px; +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx index 1846592b8995..fdd3dc85e19e 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx @@ -24,17 +24,14 @@ import moment from 'moment'; import { showDataFetchError, byteToSize } from 'utils/common'; import './om.less'; import { ColumnSearch } from 'utils/columnSearch'; -import { Link } from 'react-router-dom'; import { AxiosGetHelper, cancelRequests } from 'utils/axiosRequestHelper'; +import {IOption} from "../../../components/multiSelect/multiSelect"; +import {ActionMeta, ValueType} from "react-select"; +import CreatableSelect from "react-select/creatable"; const size = filesize.partial({ standard: 'iec' }); const { TabPane } = Tabs; -//Previous Key Need to store respective Lastkey of each API -let mismatchPrevKeyList = [0]; -let openPrevKeyList =[""]; -let keysPendingPrevList =[""]; -let deletedKeysPrevList =[0]; let keysPendingExpanded: any = []; interface IContainerResponse { containerId: number; @@ -166,7 +163,7 @@ const MISMATCH_TAB_COLUMNS = [ const OPEN_KEY_TAB_COLUMNS = [ { - title: 'Key', + title: 'Key Name', dataIndex: 'path', key: 'path', isSearchable: true @@ -277,6 +274,35 @@ const DELETED_TAB_COLUMNS = [ } ]; +const PENDINGDIR_TAB_COLUMNS = [ + { + title: 'Directory Name', + dataIndex: 'path', + key: 'path' + }, + { + title: 'In state since', + dataIndex: 'inStateSince', + key: 'inStateSince', + render: (inStateSince: number) => { + return inStateSince > 0 ? moment(inStateSince).format('ll LTS') : 'NA'; + } + }, + { + title: 'Path', + dataIndex: 'key', + key: 'key', + isSearchable: true, + width: '450px' + }, + { + title: 'Data Size', + dataIndex: 'size', + key: 'size', + render: (dataSize :any) => dataSize = byteToSize(dataSize,1) + } +]; + interface IExpandedRow { [key: number]: IExpandedRowState; } @@ -295,24 +321,29 @@ interface IOmdbInsightsState { pendingDeleteKeyDataSource: any[]; expandedRowData: IExpandedRow; deletedContainerKeysDataSource: []; - prevKeyMismatch: number; mismatchMissingState: any; - prevKeyOpen: string; - prevKeyDeleted: number; - prevKeyDeletePending: string; + pendingDeleteDirDataSource: any[]; activeTab: string; - DEFAULT_LIMIT: number, - nextClickable: boolean; includeFso: boolean; includeNonFso: boolean; - prevClickable: boolean + selectedLimit: IOption; } +const LIMIT_OPTIONS: IOption[] = [ + {label: "1000", value: "1000"}, + {label: "5000", value: "5000"}, + {label: "10000", value: "10000"}, + {label: "20000", value: "20000"} +] + +const INITIAL_LIMIT_OPTION = LIMIT_OPTIONS[0] + let cancelMismatchedEndpointSignal: AbortController; let cancelOpenKeysSignal: AbortController; let cancelDeletePendingSignal: AbortController; let cancelDeletedKeysSignal: AbortController; let cancelRowExpandSignal: AbortController; +let cancelDeletedPendingDirSignal: AbortController; export class Om extends React.Component, IOmdbInsightsState> { @@ -326,18 +357,13 @@ export class Om extends React.Component, IOmdbInsightsSta openKeysDataSource: [], pendingDeleteKeyDataSource: [], deletedContainerKeysDataSource: [], - prevKeyMismatch: 0, + pendingDeleteDirDataSource:[], mismatchMissingState: 'SCM', - prevKeyOpen: "", - prevKeyDeletePending: "", - prevKeyDeleted: 0, expandedRowData: {}, activeTab: props.location.state ? props.location.state.activeTab : '1', - DEFAULT_LIMIT: 10, - nextClickable: true, includeFso: true, includeNonFso: false, - prevClickable: false + selectedLimit: INITIAL_LIMIT_OPTION }; } @@ -389,12 +415,10 @@ export class Om extends React.Component, IOmdbInsightsSta handleExistsAtChange = (e: any) => { console.log("handleExistsAtChange", e.key); if (e.key === 'OM') { - mismatchPrevKeyList = [0]; - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, 0, 'SCM'); + this.fetchMismatchContainers('SCM'); } else { - mismatchPrevKeyList = [0]; - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, 0, 'OM'); + this.fetchMismatchContainers('OM'); } }; @@ -439,26 +463,30 @@ export class Om extends React.Component, IOmdbInsightsSta handlefsoNonfsoMenuChange = (e: any) => { if (e.key === 'fso') { - openPrevKeyList =[""]; - this.fetchOpenKeys(true, false, this.state.DEFAULT_LIMIT, ""); + this.fetchOpenKeys(true, false); } else { - openPrevKeyList = [""]; - this.fetchOpenKeys(false, true, this.state.DEFAULT_LIMIT, ""); + this.fetchOpenKeys(false, true); } }; - componentDidMount(): void { + _loadData = () => { if (this.state.activeTab === '1') { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, this.state.prevKeyMismatch, this.state.mismatchMissingState); + this.fetchMismatchContainers(this.state.mismatchMissingState); } else if (this.state.activeTab === '2') { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT, this.state.prevKeyOpen); + this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso); } else if (this.state.activeTab === '3') { keysPendingExpanded =[]; - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); + this.fetchDeletePendingKeys(); } else if (this.state.activeTab === '4') { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); + this.fetchDeletedKeys(); + } else if (this.state.activeTab === '5') { + this.fetchDeletePendingDir(); } + } + + componentDidMount(): void { + this._loadData(); }; componentWillUnmount(): void { @@ -467,13 +495,12 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletePendingSignal && cancelDeletePendingSignal.abort(); cancelDeletedKeysSignal && cancelDeletedKeysSignal.abort(); cancelRowExpandSignal && cancelRowExpandSignal.abort(); + cancelDeletedPendingDirSignal && cancelDeletedPendingDirSignal.abort(); } - fetchMismatchContainers = (limit: number, prevKeyMismatch: number, mismatchMissingState: any) => { + fetchMismatchContainers = (mismatchMissingState: any) => { this.setState({ loading: true, - nextClickable: true, - prevClickable: true, mismatchMissingState }); @@ -483,39 +510,21 @@ export class Om extends React.Component, IOmdbInsightsSta cancelOpenKeysSignal, cancelDeletePendingSignal, cancelDeletedKeysSignal, - cancelRowExpandSignal + cancelRowExpandSignal, + cancelDeletedPendingDirSignal ]); - const mismatchEndpoint = `/api/v1/containers/mismatch?limit=${limit}&prevKey=${prevKeyMismatch}&missingIn=${mismatchMissingState}` + const mismatchEndpoint = `/api/v1/containers/mismatch?limit=${this.state.selectedLimit.value}&missingIn=${mismatchMissingState}` const { request, controller } = AxiosGetHelper(mismatchEndpoint, cancelMismatchedEndpointSignal) cancelMismatchedEndpointSignal = controller; request.then(mismatchContainersResponse => { const mismatchContainers: IContainerResponse[] = mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.containerDiscrepancyInfo; - if (mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.lastKey === null) { - //No Further Records may be last record - mismatchPrevKeyList = [0]; - this.setState({ - loading: false, - nextClickable: false, - mismatchDataSource: mismatchContainers, - expandedRowData: {}, - }) - } - else { - if (this.state.prevKeyMismatch === 0 ){ - this.setState({ - prevClickable: false - }) - } - if (mismatchPrevKeyList.includes(mismatchContainersResponse.data.lastKey) === false) { - mismatchPrevKeyList.push(mismatchContainersResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyMismatch: mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.lastKey, - mismatchDataSource: mismatchContainers, - }); - } + + this.setState({ + loading: false, + mismatchDataSource: mismatchContainers + }); + }).catch(error => { this.setState({ loading: false, @@ -524,11 +533,9 @@ export class Om extends React.Component, IOmdbInsightsSta }); }; - fetchOpenKeys = (includeFso: boolean, includeNonFso: boolean, limit: number, prevKeyOpen: string) => { + fetchOpenKeys = (includeFso: boolean, includeNonFso: boolean) => { this.setState({ loading: true, - nextClickable: true, - prevClickable: true, includeFso, includeNonFso }); @@ -539,16 +546,11 @@ export class Om extends React.Component, IOmdbInsightsSta cancelOpenKeysSignal, cancelDeletePendingSignal, cancelDeletedKeysSignal, - cancelRowExpandSignal + cancelRowExpandSignal, + cancelDeletedPendingDirSignal ]); - let openKeysEndpoint; - if (prevKeyOpen === "") { - openKeysEndpoint = `/api/v1/keys/open?includeFso=${includeFso}&includeNonFso=${includeNonFso}&limit=${limit}`; - } - else { - openKeysEndpoint = `/api/v1/keys/open?includeFso=${includeFso}&includeNonFso=${includeNonFso}&limit=${limit}&prevKey=${prevKeyOpen}`; - } + let openKeysEndpoint = `/api/v1/keys/open?includeFso=${includeFso}&includeNonFso=${includeNonFso}&limit=${this.state.selectedLimit.value}`; const { request, controller } = AxiosGetHelper(openKeysEndpoint, cancelOpenKeysSignal) cancelOpenKeysSignal = controller @@ -560,31 +562,11 @@ export class Om extends React.Component, IOmdbInsightsSta openKeys[key] && openKeys[key].map((item: any) => (allopenKeysResponse.push({ ...item, type: key }))); } } + this.setState({ + loading: false, + openKeysDataSource: allopenKeysResponse, + }) - if (openKeysResponse && openKeysResponse.data && openKeysResponse.data.lastKey === "") { - //last key of api is null may be last record no further records - openPrevKeyList = [""]; - this.setState({ - loading: false, - nextClickable: false, - openKeysDataSource: allopenKeysResponse - }) - } - else { - if (this.state.prevKeyOpen === "" ){ - this.setState({ - prevClickable: false - }) - } - if (openPrevKeyList.includes(openKeysResponse.data.lastKey) === false) { - openPrevKeyList.push(openKeysResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyOpen: openKeysResponse && openKeysResponse.data && openKeysResponse.data.lastKey, - openKeysDataSource: allopenKeysResponse, - }) - }; }).catch(error => { this.setState({ loading: false @@ -594,11 +576,9 @@ export class Om extends React.Component, IOmdbInsightsSta }; - fetchDeletePendingKeys = (limit: number, prevKeyDeletePending: string) => { + fetchDeletePendingKeys = () => { this.setState({ - loading: true, - nextClickable: true, - prevClickable :true + loading: true }); //Cancel any previous pending request @@ -607,17 +587,12 @@ export class Om extends React.Component, IOmdbInsightsSta cancelOpenKeysSignal, cancelDeletePendingSignal, cancelDeletedKeysSignal, - cancelRowExpandSignal + cancelRowExpandSignal, + cancelDeletedPendingDirSignal ]); keysPendingExpanded =[]; - let deletePendingKeysEndpoint; - if (prevKeyDeletePending === "" || prevKeyDeletePending === undefined ) { - deletePendingKeysEndpoint = `/api/v1/keys/deletePending?limit=${limit}`; - } - else { - deletePendingKeysEndpoint = `/api/v1/keys/deletePending?limit=${limit}&prevKey=${prevKeyDeletePending}`; - } + let deletePendingKeysEndpoint = `/api/v1/keys/deletePending?limit=${this.state.selectedLimit.value}`; const { request, controller } = AxiosGetHelper(deletePendingKeysEndpoint, cancelDeletePendingSignal); cancelDeletePendingSignal = controller; @@ -646,30 +621,11 @@ export class Om extends React.Component, IOmdbInsightsSta } }); - if (deletePendingKeysResponse && deletePendingKeysResponse.data && deletePendingKeysResponse.data.lastKey === "") { - //last key of api is empty may be last record no further records - keysPendingPrevList =[""]; - this.setState({ - loading: false, - nextClickable: false, - pendingDeleteKeyDataSource: deletedKeyInfoData - }) - } - else { - if (this.state.prevKeyDeletePending === "" ||this.state.prevKeyDeletePending === undefined ){ - this.setState({ - prevClickable: false - }) - } - if (keysPendingPrevList.includes(deletePendingKeysResponse.data.lastKey) === false) { - keysPendingPrevList.push(deletePendingKeysResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyDeletePending: deletePendingKeysResponse && deletePendingKeysResponse.data && deletePendingKeysResponse.data.lastKey, - pendingDeleteKeyDataSource: deletedKeyInfoData - }); - } + this.setState({ + loading: false, + pendingDeleteKeyDataSource: deletedKeyInfoData + }); + }).catch(error => { this.setState({ loading: false, @@ -722,11 +678,9 @@ export class Om extends React.Component, IOmdbInsightsSta ); } - fetchDeletedKeys = (limit: number, prevKeyDeleted: number) => { + fetchDeletedKeys = () => { this.setState({ - loading: true, - nextClickable: true, - prevClickable: true + loading: true }); //Cancel any previous pending request @@ -735,54 +689,64 @@ export class Om extends React.Component, IOmdbInsightsSta cancelOpenKeysSignal, cancelDeletePendingSignal, cancelDeletedKeysSignal, - cancelRowExpandSignal + cancelRowExpandSignal, + cancelDeletedPendingDirSignal ]); - const deletedKeysEndpoint = `/api/v1/containers/mismatch/deleted?limit=${limit}&prevKey=${prevKeyDeleted}`; + const deletedKeysEndpoint = `/api/v1/containers/mismatch/deleted?limit=${this.state.selectedLimit.value}`; const { request, controller } = AxiosGetHelper(deletedKeysEndpoint, cancelDeletedKeysSignal); cancelDeletedKeysSignal = controller request.then(deletedKeysResponse => { let deletedContainerKeys = []; deletedContainerKeys = deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.containers; - if (deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.lastKey === null) { - // no more further records last key - deletedKeysPrevList = [0]; - this.setState({ - loading: false, - nextClickable: false, - deletedContainerKeysDataSource: deletedContainerKeys, - expandedRowData: {}, - }) - } - else { - if (this.state.prevKeyDeleted === 0 ){ - this.setState({ - prevClickable: false - }) - } - if (deletedKeysPrevList.includes(deletedKeysResponse.data.lastKey) === false) { - deletedKeysPrevList.push(deletedKeysResponse.data.lastKey); - } + this.setState({ + loading: false, + deletedContainerKeysDataSource: deletedContainerKeys + }) + }).catch(error => { + this.setState({ + loading: false + }); + showDataFetchError(error.toString()); + }); + }; + + // Pending Delete Directories + fetchDeletePendingDir = () => { + this.setState({ + loading: true + }); + + //Cancel any previous pending request + cancelRequests([ + cancelMismatchedEndpointSignal, + cancelOpenKeysSignal, + cancelDeletePendingSignal, + cancelDeletedKeysSignal, + cancelRowExpandSignal, + cancelDeletedPendingDirSignal + ]); + + const DELETE_PENDING_DIR_ENDPOINT = `/api/v1/keys/deletePending/dirs?limit=${this.state.selectedLimit.value}`; + const { request, controller } = AxiosGetHelper(DELETE_PENDING_DIR_ENDPOINT, cancelDeletedPendingDirSignal); + cancelDeletedPendingDirSignal = controller + request.then(deletePendingDirResponse => { + let deletedDirInfo = []; + deletedDirInfo = deletePendingDirResponse && deletePendingDirResponse.data && deletePendingDirResponse.data.deletedDirInfo; this.setState({ loading: false, - prevKeyDeleted: deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.lastKey, - deletedContainerKeysDataSource: deletedContainerKeys - }) - }; + pendingDeleteDirDataSource: deletedDirInfo + }); }).catch(error => { this.setState({ - loading: false + loading: false, }); showDataFetchError(error.toString()); }); }; + changeTab = (activeKey: any) => { - //when changing tab make empty all datasets and prevkey and deafult filtering to intial values also cancel all pending requests - mismatchPrevKeyList = [0]; - openPrevKeyList =[""]; - keysPendingPrevList =[""]; - deletedKeysPrevList =[0]; this.setState({ activeTab: activeKey, mismatchDataSource: [], @@ -790,121 +754,29 @@ export class Om extends React.Component, IOmdbInsightsSta pendingDeleteKeyDataSource: [], deletedContainerKeysDataSource: [], expandedRowData: {}, - prevKeyOpen: "", - prevKeyDeletePending: "", - prevKeyDeleted: 0, - prevKeyMismatch: 0, mismatchMissingState: 'SCM', includeFso: true, includeNonFso: false, - DEFAULT_LIMIT: 10, - + selectedLimit: INITIAL_LIMIT_OPTION }, () => { if (activeKey === '2') { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT, this.state.prevKeyOpen); + this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso); } else if (activeKey === '3') { keysPendingExpanded =[]; - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); + this.fetchDeletePendingKeys(); } else if (activeKey === '4') { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); + this.fetchDeletedKeys(); + } else if (activeKey === '5') { + this.fetchDeletePendingDir (); } else { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, this.state.prevKeyMismatch, this.state.mismatchMissingState); + this.fetchMismatchContainers(this.state.mismatchMissingState); } }) }; - fetchPreviousRecords = () => { - // to fetch previous call stored all prevkey in array and fetching in respective tabs - if (this.state.activeTab === '2') { - this.setState({ - prevKeyOpen: openPrevKeyList[openPrevKeyList.indexOf(this.state.prevKeyOpen)-2] - }, () => { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT,this.state.prevKeyOpen); - }) - } else if (this.state.activeTab === '3') { - this.setState({ - prevKeyDeletePending: keysPendingPrevList[keysPendingPrevList.indexOf(this.state.prevKeyDeletePending)-2] - }, () => { - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); - }) - } else if (this.state.activeTab === '4') { - this.setState({ - prevKeyDeleted: deletedKeysPrevList[deletedKeysPrevList.indexOf(this.state.prevKeyDeleted)-2] - }, () => { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT,this.state.prevKeyDeleted); - }) - } - else { - this.setState({ - prevKeyMismatch: mismatchPrevKeyList[mismatchPrevKeyList.indexOf(this.state.prevKeyMismatch)-2] - }, () => { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT,this.state.prevKeyMismatch, this.state.mismatchMissingState); - }) - } - }; - - fetchNextRecords = () => { - // To Call API for Page Level for each page fetch next records - if (this.state.activeTab === '2') { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT, this.state.prevKeyOpen); - } else if (this.state.activeTab === '3') { - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); - } else if (this.state.activeTab === '4') { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); - } - else { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, this.state.prevKeyMismatch, this.state.mismatchMissingState); - } - }; - - itemRender = (_: any, type: string, originalElement: any) => { - if (type === 'prev') { - return
    {this.state.prevClickable ? Prev: No Records}
    ; - } - if (type === 'next') { - return
    {this.state.nextClickable ? {'>>'} : No More Further Records}
    ; - } - return originalElement; - }; - onShowSizeChange = (current: number, pageSize: number) => { - if (this.state.activeTab === '2') { - //open keys - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyOpen: openPrevKeyList[openPrevKeyList.indexOf(this.state.prevKeyOpen)-1] - }, () => { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT,this.state.prevKeyOpen); - }); - } - else if (this.state.activeTab === '3') { - //keys pending for deletion - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyDeletePending: keysPendingPrevList[keysPendingPrevList.indexOf(this.state.prevKeyDeletePending)-1] - }, () => { - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); - }) - } - else if (this.state.activeTab === '4') { - //deleted container keys - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyDeleted: deletedKeysPrevList[deletedKeysPrevList.indexOf(this.state.prevKeyDeleted)-1] - }, () => { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); - }) - } - else { - // active tab 1 for mismatch - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyMismatch: mismatchPrevKeyList[mismatchPrevKeyList.indexOf(this.state.prevKeyMismatch)-1] - }, () => { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT,this.state.prevKeyMismatch, this.state.mismatchMissingState); - }); - } + console.log(current, pageSize); }; onRowExpandClick = (expanded: boolean, record: IContainerResponse) => { @@ -1039,16 +911,50 @@ export class Om extends React.Component, IOmdbInsightsSta }, []) }; + searchDirPendingColumn = () => { + return PENDINGDIR_TAB_COLUMNS.reduce((filtered, column) => { + if (column.isSearchable) { + const newColumn = { + ...column, + ...new ColumnSearch(column).getColumnSearchProps(column.dataIndex) + }; + filtered.push(newColumn); + } else { + filtered.push(column); + } + return filtered; + }, []) + }; + + _handleLimitChange = (selected: ValueType, _action: ActionMeta) => { + const selectedLimit = (selected as IOption) + this.setState({ + selectedLimit + }, this._loadData); + } + + _onCreateOption = (created: string) => { + // Check that it's a numeric and non-negative + if (parseInt(created)) { + const createdOption: IOption = { + label: created, + value: created + } + this.setState({ + selectedLimit: createdOption + }, this._loadData); + } else { + console.log('Not a valid option') + } + } + render() { - const { mismatchDataSource, loading, openKeysDataSource, pendingDeleteKeyDataSource, deletedContainerKeysDataSource } = this.state; + const { mismatchDataSource, loading, openKeysDataSource, pendingDeleteKeyDataSource, deletedContainerKeysDataSource, pendingDeleteDirDataSource, selectedLimit } = this.state; const paginationConfig: PaginationConfig = { - pageSize:this.state.DEFAULT_LIMIT, - defaultPageSize: this.state.DEFAULT_LIMIT, - pageSizeOptions: ['10', '20', '30', '50'], + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total}`, showSizeChanger: true, onShowSizeChange: this.onShowSizeChange, - itemRender: this.itemRender }; const generateMismatchTable = (dataSource: any) => { @@ -1065,7 +971,7 @@ export class Om extends React.Component, IOmdbInsightsSta return } @@ -1090,14 +996,43 @@ export class Om extends React.Component, IOmdbInsightsSta /> } - + const generateDirPendingTable = (dataSource: any) => { + return
    + } return ( -
    +
    OM DB Insights
    +
    + { + // Only number will be accepted + return !isNaN(parseInt(input)) + }} + options={LIMIT_OPTIONS} + hideSelectedOptions={false} + value={selectedLimit} + createOptionPosition='last' + formatCreateLabel={(input) => { + return `new limit... ${input}` + }} + /> Limit +
    {generateMismatchTable(mismatchDataSource)} @@ -1123,6 +1058,15 @@ export class Om extends React.Component, IOmdbInsightsSta }> {generateDeletedKeysTable(deletedContainerKeysDataSource)} + Directories Pending for Deletion   + + + + + }> + {generateDirPendingTable(pendingDeleteDirDataSource)} +
    diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index 42d69e030f31..a9ed342faad4 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -65,6 +65,7 @@ */ public final class OMMetadataManagerTestUtils { + private static OzoneConfiguration configuration; private OMMetadataManagerTestUtils() { } @@ -129,8 +130,9 @@ public static ReconOMMetadataManager getTestReconOmMetadataManager( DBCheckpoint checkpoint = omMetadataManager.getStore() .getCheckpoint(true); assertNotNull(checkpoint.getCheckpointLocation()); - - OzoneConfiguration configuration = new OzoneConfiguration(); + if (configuration == null) { + configuration = new OzoneConfiguration(); + } configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir .getAbsolutePath()); @@ -397,23 +399,31 @@ public static void writeDirToOm(OMMetadataManager omMetadataManager, .build()); } + @SuppressWarnings("parameternumber") public static void writeDeletedDirToOm(OMMetadataManager omMetadataManager, String bucketName, String volumeName, String dirName, long parentObjectId, long bucketObjectId, - long volumeObjectId) + long volumeObjectId, + long objectId) throws IOException { - // DB key in DeletedDirectoryTable => "volumeID/bucketID/parentId/dirName" - String omKey = omMetadataManager.getOzonePathKey(volumeObjectId, - bucketObjectId, parentObjectId, dirName); + // DB key in DeletedDirectoryTable => + // "volumeID/bucketID/parentId/dirName/dirObjectId" + + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeObjectId, + bucketObjectId, parentObjectId, dirName); + String ozoneDeleteKey = omMetadataManager.getOzoneDeletePathKey( + objectId, ozoneDbKey); + - omMetadataManager.getDeletedDirTable().put(omKey, + omMetadataManager.getDeletedDirTable().put(ozoneDeleteKey, new OmKeyInfo.Builder() .setBucketName(bucketName) .setVolumeName(volumeName) .setKeyName(dirName) + .setObjectID(objectId) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .build()); } @@ -493,4 +503,14 @@ public static OmKeyLocationInfo getOmKeyLocationInfo(BlockID blockID, public static BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } + + public static OzoneConfiguration getConfiguration() { + return configuration; + } + + public static void setConfiguration( + OzoneConfiguration configuration) { + OMMetadataManagerTestUtils.configuration = configuration; + } + } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java index f49826e67d81..d5962c0c407d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java @@ -45,7 +45,11 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -53,6 +57,7 @@ * Test Recon Utility methods. */ public class TestReconUtils { + private static PipelineID randomPipelineID = PipelineID.randomId(); @TempDir private Path temporaryFolder; @@ -234,4 +239,24 @@ private static int oldNextClosestPowerIndexOfTwo(long dataSize) { } return index; } + + private static ContainerInfo.Builder getDefaultContainerInfoBuilder( + final HddsProtos.LifeCycleState state) { + return new ContainerInfo.Builder() + .setContainerID(RandomUtils.nextLong()) + .setReplicationConfig( + RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE)) + .setState(state) + .setSequenceId(10000L) + .setOwner("TEST"); + } + + + public static ContainerInfo getContainer( + final HddsProtos.LifeCycleState state) { + return getDefaultContainerInfoBuilder(state) + .setPipelineID(randomPipelineID) + .build(); + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 854ac74bd390..82c7c1b5bef0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -885,6 +885,7 @@ public void testUnhealthyContainers() throws IOException, TimeoutException { public void testUnhealthyContainersFilteredResponse() throws IOException, TimeoutException { String missing = UnHealthyContainerStates.MISSING.toString(); + String emptyMissing = UnHealthyContainerStates.EMPTY_MISSING.toString(); Response response = containerEndpoint .getUnhealthyContainers(missing, 1000, 1); @@ -904,6 +905,7 @@ public void testUnhealthyContainersFilteredResponse() uuid3 = newDatanode("host3", "127.0.0.3"); uuid4 = newDatanode("host4", "127.0.0.4"); createUnhealthyRecords(5, 4, 3, 2); + createEmptyMissingUnhealthyRecords(2); response = containerEndpoint.getUnhealthyContainers(missing, 1000, 1); @@ -926,6 +928,13 @@ public void testUnhealthyContainersFilteredResponse() for (UnhealthyContainerMetadata r : records) { assertEquals(missing, r.getContainerState()); } + + Response filteredEmptyMissingResponse = containerEndpoint + .getUnhealthyContainers(emptyMissing, 1000, 1); + responseObject = (UnhealthyContainersResponse) filteredEmptyMissingResponse.getEntity(); + records = responseObject.getContainers(); + // Assert for zero empty missing containers. + assertEquals(0, records.size()); } @Test @@ -1026,6 +1035,14 @@ UUID newDatanode(String hostName, String ipAddress) throws IOException { return uuid; } + private void createEmptyMissingUnhealthyRecords(int emptyMissing) { + int cid = 0; + for (int i = 0; i < emptyMissing; i++) { + createUnhealthyRecord(++cid, UnHealthyContainerStates.EMPTY_MISSING.toString(), + 3, 3, 0, null); + } + } + private void createUnhealthyRecords(int missing, int overRep, int underRep, int misRep) { int cid = 0; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 05d9927d6c93..2c3439cd19b6 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -74,6 +74,7 @@ import org.apache.hadoop.ozone.recon.api.types.DatanodesResponse; import org.apache.hadoop.ozone.recon.api.types.PipelineMetadata; import org.apache.hadoop.ozone.recon.api.types.PipelinesResponse; +import org.apache.hadoop.ozone.recon.api.types.RemoveDataNodesResponseWrapper; import org.apache.hadoop.ozone.recon.api.types.VolumeObjectDBInfo; import org.apache.hadoop.ozone.recon.api.types.VolumesResponse; import org.apache.hadoop.ozone.recon.common.CommonUtils; @@ -101,6 +102,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mockito; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto; @@ -113,7 +115,9 @@ import static org.apache.hadoop.ozone.recon.spi.impl.PrometheusServiceProviderImpl.PROMETHEUS_INSTANT_QUERY_API; import static org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; @@ -140,8 +144,11 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicInteger; @@ -168,25 +175,36 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private String pipelineId; private DatanodeDetails datanodeDetails; private DatanodeDetails datanodeDetails2; + private DatanodeDetails datanodeDetails3; + private DatanodeDetails datanodeDetails4; private long containerId = 1L; private ContainerReportsProto containerReportsProto; private ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto; + private ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto3; private Pipeline pipeline; private FileCountBySizeDao fileCountBySizeDao; private ContainerCountBySizeDao containerCountBySizeDao; private DSLContext dslContext; private static final String HOST1 = "host1.datanode"; private static final String HOST2 = "host2.datanode"; + private static final String HOST3 = "host3.datanode"; + private static final String HOST4 = "host4.datanode"; private static final String IP1 = "1.1.1.1"; private static final String IP2 = "2.2.2.2"; + private static final String IP3 = "3.3.3.3"; + private static final String IP4 = "4.4.4.4"; private static final String PROMETHEUS_TEST_RESPONSE_FILE = "prometheus-test-response.txt"; private ReconUtils reconUtilsMock; + private StorageContainerLocationProtocol mockScmClient; private ContainerHealthSchemaManager containerHealthSchemaManager; private CommonUtils commonUtils; private PipelineManager pipelineManager; private ReconPipelineManager reconPipelineManager; + private List nodes = getNodeDetails(2); + private Map> containerOnDecom = getContainersOnDecomNodes(); + private ArrayList metrics = getMetrics(); public TestEndpoints() { super(); @@ -199,10 +217,16 @@ private void initializeInjector() throws Exception { Files.createDirectory(temporaryFolder.resolve("NewDir")).toFile()); datanodeDetails = randomDatanodeDetails(); datanodeDetails2 = randomDatanodeDetails(); + datanodeDetails3 = randomDatanodeDetails(); + datanodeDetails4 = randomDatanodeDetails(); datanodeDetails.setHostName(HOST1); datanodeDetails.setIpAddress(IP1); datanodeDetails2.setHostName(HOST2); datanodeDetails2.setIpAddress(IP2); + datanodeDetails3.setHostName(HOST3); + datanodeDetails3.setIpAddress(IP3); + datanodeDetails4.setHostName(HOST4); + datanodeDetails4.setIpAddress(IP4); pipeline = getRandomPipeline(datanodeDetails); pipelineId = pipeline.getId().getId().toString(); @@ -218,8 +242,8 @@ private void initializeInjector() throws Exception { ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(containerInfo, pipeline); - StorageContainerLocationProtocol mockScmClient = mock( - StorageContainerLocationProtocol.class); + mockScmClient = mock( + StorageContainerLocationProtocol.class, Mockito.RETURNS_DEEP_STUBS); StorageContainerServiceProvider mockScmServiceProvider = mock( StorageContainerServiceProviderImpl.class); when(mockScmServiceProvider.getPipeline( @@ -288,8 +312,9 @@ private void initializeInjector() throws Exception { utilizationSchemaDefinition); fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition); - omTableInsightTask = new OmTableInsightTask( - globalStatsDao, sqlConfiguration, reconOMMetadataManager); + omTableInsightTask = + new OmTableInsightTask(globalStatsDao, sqlConfiguration, + reconOMMetadataManager); containerHealthSchemaManager = reconTestInjector.getInstance(ContainerHealthSchemaManager.class); clusterStateEndpoint = @@ -405,7 +430,38 @@ public void setUp() throws Exception { .addStorageReport(storageReportProto4).build(); LayoutVersionProto layoutInfo = defaultLayoutVersionProto(); - try { + DatanodeDetailsProto datanodeDetailsProto3 = + DatanodeDetailsProto.newBuilder() + .setHostName(HOST3) + .setUuid(datanodeDetails3.getUuid().toString()) + .setIpAddress(IP3) + .build(); + extendedDatanodeDetailsProto3 = + HddsProtos.ExtendedDatanodeDetailsProto.newBuilder() + .setDatanodeDetails(datanodeDetailsProto3) + .setVersion("0.6.0") + .setSetupTime(1596347628802L) + .setBuildDate("2020-08-01T08:50Z") + .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") + .build(); + StorageReportProto storageReportProto5 = + StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) + .setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800) + .setCapacity(50000) + .setStorageUuid(UUID.randomUUID().toString()) + .setFailed(false).build(); + StorageReportProto storageReportProto6 = + StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) + .setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000) + .setCapacity(80000) + .setStorageUuid(UUID.randomUUID().toString()) + .setFailed(false).build(); + NodeReportProto nodeReportProto3 = + NodeReportProto.newBuilder() + .addStorageReport(storageReportProto5) + .addStorageReport(storageReportProto6).build(); + + assertDoesNotThrow(() -> { reconScm.getDatanodeProtocolServer() .register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto, layoutInfo); @@ -414,11 +470,14 @@ public void setUp() throws Exception { ContainerReportsProto.newBuilder().build(), PipelineReportsProto.newBuilder().build(), defaultLayoutVersionProto()); + reconScm.getDatanodeProtocolServer() + .register(extendedDatanodeDetailsProto3, nodeReportProto3, + ContainerReportsProto.newBuilder().build(), + PipelineReportsProto.newBuilder().build(), + defaultLayoutVersionProto()); // Process all events in the event queue reconScm.getEventQueue().processAll(1000); - } catch (Exception ex) { - fail(ex.getMessage()); - } + }); // Write Data to OM // A sample volume (sampleVol) and a bucket (bucketOne) is already created // in AbstractOMMetadataManagerTest. @@ -435,14 +494,12 @@ public void setUp() throws Exception { .addOzoneAcls(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "TestUser2", - IAccessAuthorizer.ACLType.WRITE, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE )) .addOzoneAcls(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "TestUser2", - IAccessAuthorizer.ACLType.READ, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.READ )) .build(); reconOMMetadataManager.getVolumeTable().put(volumeKey, args); @@ -453,8 +510,7 @@ public void setUp() throws Exception { .addAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.GROUP, "TestGroup2", - IAccessAuthorizer.ACLType.WRITE, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE )) .setQuotaInBytes(OzoneConsts.GB) .setUsedBytes(OzoneConsts.MB) @@ -477,8 +533,7 @@ public void setUp() throws Exception { .addAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.GROUP, "TestGroup2", - IAccessAuthorizer.ACLType.READ, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.READ )) .setQuotaInBytes(OzoneConsts.GB) .setUsedBytes(100 * OzoneConsts.MB) @@ -515,11 +570,11 @@ public void setUp() throws Exception { // Populate the deletedDirectories table in OM DB writeDeletedDirToOm(reconOMMetadataManager, "Bucket1", "Volume1", "dir1", - 3L, 2L, 1L); + 3L, 2L, 1L, 23L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket2", "Volume2", "dir2", - 6L, 5L, 4L); + 6L, 5L, 4L, 22L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket3", "Volume3", "dir3", - 9L, 8L, 7L); + 9L, 8L, 7L, 21L); // Truncate global stats table before running each test dslContext.truncate(GLOBAL_STATS); @@ -561,6 +616,17 @@ private void testDatanodeResponse(DatanodeMetadata datanodeMetadata) assertEquals(80000, datanodeMetadata.getDatanodeStorageReport().getUsed()); + assertEquals(0, datanodeMetadata.getPipelines().size()); + assertEquals(0, datanodeMetadata.getLeaderCount()); + break; + case HOST3: + assertEquals(130000, + datanodeMetadata.getDatanodeStorageReport().getCapacity()); + assertEquals(17800, + datanodeMetadata.getDatanodeStorageReport().getRemaining()); + assertEquals(80000, + datanodeMetadata.getDatanodeStorageReport().getUsed()); + assertEquals(0, datanodeMetadata.getPipelines().size()); assertEquals(0, datanodeMetadata.getLeaderCount()); break; @@ -577,8 +643,8 @@ public void testGetDatanodes() throws Exception { Response response = nodeEndpoint.getDatanodes(); DatanodesResponse datanodesResponse = (DatanodesResponse) response.getEntity(); - assertEquals(2, datanodesResponse.getTotalCount()); - assertEquals(2, datanodesResponse.getDatanodes().size()); + assertEquals(3, datanodesResponse.getTotalCount()); + assertEquals(3, datanodesResponse.getDatanodes().size()); datanodesResponse.getDatanodes().forEach(datanodeMetadata -> { try { @@ -594,7 +660,7 @@ public void testGetDatanodes() throws Exception { (DatanodesResponse) response1.getEntity(); DatanodeMetadata datanodeMetadata1 = datanodesResponse1.getDatanodes().stream().filter(datanodeMetadata -> - datanodeMetadata.getHostname().equals("host1.datanode")) + datanodeMetadata.getHostname().equals("host1.datanode")) .findFirst().orElse(null); return (datanodeMetadata1 != null && datanodeMetadata1.getContainers() == 1 && @@ -699,7 +765,7 @@ public void testGetMetricsResponse() throws Exception { byte[] fileBytes = FileUtils.readFileToByteArray( new File(classLoader.getResource(PROMETHEUS_TEST_RESPONSE_FILE) .getFile()) - ); + ); verify(outputStreamMock).write(fileBytes, 0, fileBytes.length); } @@ -713,8 +779,8 @@ public void testGetClusterState() throws Exception { assertEquals(0, clusterStateResponse.getVolumes()); assertEquals(0, clusterStateResponse.getBuckets()); assertEquals(0, clusterStateResponse.getKeys()); - assertEquals(2, clusterStateResponse.getTotalDatanodes()); - assertEquals(2, clusterStateResponse.getHealthyDatanodes()); + assertEquals(3, clusterStateResponse.getTotalDatanodes()); + assertEquals(3, clusterStateResponse.getHealthyDatanodes()); assertEquals(0, clusterStateResponse.getMissingContainers()); waitAndCheckConditionAfterHeartbeat(() -> { @@ -866,10 +932,12 @@ public void testGetContainerCounts() throws Exception { ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); given(omContainerInfo1.containerID()).willReturn(new ContainerID(1)); given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB + given(omContainerInfo1.getState()).willReturn(LifeCycleState.OPEN); ContainerInfo omContainerInfo2 = mock(ContainerInfo.class); given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB + given(omContainerInfo2.getState()).willReturn(LifeCycleState.OPEN); // Create a list of container info objects List containers = new ArrayList<>(); @@ -1179,4 +1247,219 @@ private void waitAndCheckConditionAfterHeartbeat(Callable check) private BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } + + @Test + public void testExplicitRemovalOfDecommissionedNode() throws Exception { + Response response = nodeEndpoint.getDatanodes(); + + DatanodesResponse datanodesResponse = + (DatanodesResponse) response.getEntity(); + assertEquals(3, datanodesResponse.getTotalCount()); + assertEquals(3, datanodesResponse.getDatanodes().size()); + + // Change Node3 OperationalState with NodeManager to NodeOperationalState.DECOMMISSIONED + final NodeManager nodeManager = reconScm.getScmNodeManager(); + final DatanodeDetails dnDetailsInternal = + nodeManager.getNodeByUuid(datanodeDetails3.getUuidString()); + // Backup existing state and sanity check + final NodeStatus nStatus = nodeManager.getNodeStatus(dnDetailsInternal); + final NodeOperationalState backupOpState = + dnDetailsInternal.getPersistedOpState(); + final long backupOpStateExpiry = + dnDetailsInternal.getPersistedOpStateExpiryEpochSec(); + assertEquals(backupOpState, nStatus.getOperationalState()); + assertEquals(backupOpStateExpiry, nStatus.getOpStateExpiryEpochSeconds()); + + dnDetailsInternal.setPersistedOpState(NodeOperationalState.DECOMMISSIONED); + dnDetailsInternal.setPersistedOpStateExpiryEpochSec(666L); + nodeManager.setNodeOperationalState(dnDetailsInternal, + NodeOperationalState.DECOMMISSIONED, 666L); + + Response removedDNResponse = nodeEndpoint.removeDatanodes(Arrays.asList(datanodeDetails3.getUuid().toString())); + + RemoveDataNodesResponseWrapper removeDataNodesResponseWrapper = + (RemoveDataNodesResponseWrapper) removedDNResponse.getEntity(); + DatanodesResponse errorDataNodes = removeDataNodesResponseWrapper.getDatanodesResponseMap().get("failedDatanodes"); + DatanodesResponse removedNodes = removeDataNodesResponseWrapper.getDatanodesResponseMap().get("removedDatanodes"); + assertEquals(1, removedNodes.getTotalCount()); + assertNull(errorDataNodes); + removedNodes.getDatanodes().forEach(datanodeMetadata -> { + assertEquals("host3.datanode", datanodeMetadata.getHostname()); + }); + } + + @Test + public void testExplicitRemovalOfInvalidStateNode() { + String dnUUID = datanodeDetails2.getUuid().toString(); + Response removedDNResponse = nodeEndpoint.removeDatanodes(Arrays.asList(dnUUID)); + RemoveDataNodesResponseWrapper removeDataNodesResponseWrapper = + (RemoveDataNodesResponseWrapper) removedDNResponse.getEntity(); + Map datanodesResponseMap = removeDataNodesResponseWrapper.getDatanodesResponseMap(); + assertFalse(datanodesResponseMap.isEmpty()); + DatanodesResponse failedDatanodes = datanodesResponseMap.get("failedDatanodes"); + Map failedNodeErrorResponseMap = failedDatanodes.getFailedNodeErrorResponseMap(); + assertFalse(failedNodeErrorResponseMap.isEmpty()); + String nodeError = failedNodeErrorResponseMap.get(dnUUID); + assertNotNull(nodeError); + assertEquals("DataNode should be in either DECOMMISSIONED operational state or DEAD node state.", nodeError); + assertEquals(Response.Status.OK.getStatusCode(), removedDNResponse.getStatus()); + } + + @Test + public void testExplicitRemovalOfNonExistingNode() { + String dnUUID = datanodeDetails4.getUuid().toString(); + Response removedDNResponse = nodeEndpoint.removeDatanodes(Arrays.asList(dnUUID)); + RemoveDataNodesResponseWrapper removeDataNodesResponseWrapper = + (RemoveDataNodesResponseWrapper) removedDNResponse.getEntity(); + DatanodesResponse notFoundDatanodes = removeDataNodesResponseWrapper.getDatanodesResponseMap() + .get("notFoundDatanodes"); + assertEquals(1, notFoundDatanodes.getTotalCount()); + Collection datanodes = notFoundDatanodes.getDatanodes(); + assertEquals(1, datanodes.size()); + DatanodeMetadata datanodeMetadata = datanodes.stream().findFirst().get(); + assertEquals(dnUUID, datanodeMetadata.getUuid()); + } + + @Test + public void testSuccessWhenDecommissionStatus() throws IOException { + when(mockScmClient.queryNode(any(), any(), any(), any(), any(Integer.class))).thenReturn( + nodes); // 2 nodes decommissioning + when(mockScmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(mockScmClient.getMetrics(any())).thenReturn(metrics.get(1)); + Response datanodesDecommissionInfo = nodeEndpoint.getDatanodesDecommissionInfo(); + Map responseMap = (Map) datanodesDecommissionInfo.getEntity(); + List> dnDecommissionInfo = + (List>) responseMap.get("DatanodesDecommissionInfo"); + DatanodeDetails datanode = (DatanodeDetails) dnDecommissionInfo.get(0).get("datanodeDetails"); + Map dnMetrics = (Map) dnDecommissionInfo.get(0).get("metrics"); + Map containers = (Map) dnDecommissionInfo.get(0).get("containers"); + assertNotNull(datanode); + assertNotNull(dnMetrics); + assertNotNull(containers); + assertFalse(datanode.getUuidString().isEmpty()); + assertFalse(((String) dnMetrics.get("decommissionStartTime")).isEmpty()); + assertEquals(1, dnMetrics.get("numOfUnclosedPipelines")); + assertEquals(3.0, dnMetrics.get("numOfUnderReplicatedContainers")); + assertEquals(3.0, dnMetrics.get("numOfUnclosedContainers")); + + assertEquals(3, ((List) containers.get("UnderReplicated")).size()); + assertEquals(3, ((List) containers.get("UnClosed")).size()); + } + + @Test + public void testSuccessWhenDecommissionStatusWithUUID() throws IOException { + when(mockScmClient.queryNode(any(), any(), any(), any(), any(Integer.class))).thenReturn( + getNodeDetailsForUuid("654c4b89-04ef-4015-8a3b-50d0fb0e1684")); // 1 nodes decommissioning + when(mockScmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + Response datanodesDecommissionInfo = + nodeEndpoint.getDecommissionInfoForDatanode("654c4b89-04ef-4015-8a3b-50d0fb0e1684", ""); + Map responseMap = (Map) datanodesDecommissionInfo.getEntity(); + List> dnDecommissionInfo = + (List>) responseMap.get("DatanodesDecommissionInfo"); + DatanodeDetails datanode = (DatanodeDetails) dnDecommissionInfo.get(0).get("datanodeDetails"); + Map containers = (Map) dnDecommissionInfo.get(0).get("containers"); + assertNotNull(datanode); + assertNotNull(containers); + assertFalse(datanode.getUuidString().isEmpty()); + assertEquals("654c4b89-04ef-4015-8a3b-50d0fb0e1684", datanode.getUuidString()); + + assertEquals(3, ((List) containers.get("UnderReplicated")).size()); + assertEquals(3, ((List) containers.get("UnClosed")).size()); + } + + private List getNodeDetailsForUuid(String uuid) { + List nodesList = new ArrayList<>(); + + HddsProtos.DatanodeDetailsProto.Builder dnd = + HddsProtos.DatanodeDetailsProto.newBuilder(); + dnd.setHostName("hostName"); + dnd.setIpAddress("1.2.3.5"); + dnd.setNetworkLocation("/default"); + dnd.setNetworkName("hostName"); + dnd.addPorts(HddsProtos.Port.newBuilder() + .setName("ratis").setValue(5678).build()); + dnd.setUuid(uuid); + + HddsProtos.Node.Builder builder = HddsProtos.Node.newBuilder(); + builder.addNodeOperationalStates( + HddsProtos.NodeOperationalState.DECOMMISSIONING); + builder.addNodeStates(HddsProtos.NodeState.HEALTHY); + builder.setNodeID(dnd.build()); + nodesList.add(builder.build()); + return nodesList; + } + + private List getNodeDetails(int n) { + List nodesList = new ArrayList<>(); + + for (int i = 0; i < n; i++) { + HddsProtos.DatanodeDetailsProto.Builder dnd = + HddsProtos.DatanodeDetailsProto.newBuilder(); + dnd.setHostName("host" + i); + dnd.setIpAddress("1.2.3." + i + 1); + dnd.setNetworkLocation("/default"); + dnd.setNetworkName("host" + i); + dnd.addPorts(HddsProtos.Port.newBuilder() + .setName("ratis").setValue(5678).build()); + dnd.setUuid(UUID.randomUUID().toString()); + + HddsProtos.Node.Builder builder = HddsProtos.Node.newBuilder(); + builder.addNodeOperationalStates( + HddsProtos.NodeOperationalState.DECOMMISSIONING); + builder.addNodeStates(HddsProtos.NodeState.HEALTHY); + builder.setNodeID(dnd.build()); + nodesList.add(builder.build()); + } + return nodesList; + } + + private Map> getContainersOnDecomNodes() { + Map> containerMap = new HashMap<>(); + List underReplicated = new ArrayList<>(); + underReplicated.add(new ContainerID(1L)); + underReplicated.add(new ContainerID(2L)); + underReplicated.add(new ContainerID(3L)); + containerMap.put("UnderReplicated", underReplicated); + List unclosed = new ArrayList<>(); + unclosed.add(new ContainerID(10L)); + unclosed.add(new ContainerID(11L)); + unclosed.add(new ContainerID(12L)); + containerMap.put("UnClosed", unclosed); + return containerMap; + } + + private ArrayList getMetrics() { + ArrayList result = new ArrayList<>(); + // no nodes decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 0, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 0, " + + "\"ContainersUnderReplicatedTotal\" : 0, \"ContainersUnClosedTotal\" : 0, " + + "\"ContainersSufficientlyReplicatedTotal\" : 0 } ]}"); + // 2 nodes in decommisioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 2, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 2, " + + "\"ContainersUnderReplicatedTotal\" : 6, \"ContainersUnclosedTotal\" : 6, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\", \"tag.Hostname.1\" : \"host0\", " + + "\"PipelinesWaitingToCloseDN.1\" : 1, \"UnderReplicatedDN.1\" : 3, " + + "\"SufficientlyReplicatedDN.1\" : 0, \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 111211, " + + "\"tag.datanode.2\" : \"host1\", \"tag.Hostname.2\" : \"host1\", " + + "\"PipelinesWaitingToCloseDN.2\" : 1, \"UnderReplicatedDN.2\" : 3, " + + "\"SufficientlyReplicatedDN.2\" : 0, \"UnclosedContainersDN.2\" : 3, \"StartTimeDN.2\" : 221221} ]}"); + // only host 1 decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 1, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 1, " + + "\"ContainersUnderReplicatedTotal\" : 3, \"ContainersUnclosedTotal\" : 3, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\",\n \"tag.Hostname.1\" : \"host0\",\n " + + "\"PipelinesWaitingToCloseDN.1\" : 1,\n \"UnderReplicatedDN.1\" : 3,\n " + + "\"SufficientlyReplicatedDN.1\" : 0,\n \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 221221} ]}"); + return result; + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java new file mode 100644 index 000000000000..a244e4ff2ce2 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java @@ -0,0 +1,421 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; + +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO; +import org.junit.jupiter.api.BeforeEach; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import javax.ws.rs.core.Response; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.UUID; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.mockito.Mockito.when; + +/** + * Test NSSummary Disk Usage subpath ordering. + */ +public class TestNSSummaryDiskUsageOrdering { + + @TempDir + private Path temporaryFolder; + + private ReconOMMetadataManager reconOMMetadataManager; + private NSSummaryEndpoint nsSummaryEndpoint; + private OzoneConfiguration ozoneConfiguration; + private static final String ROOT_PATH = "/"; + private static final String TEST_USER = "TestUser"; + private OMMetadataManager omMetadataManager; + @BeforeEach + public void setUp() throws Exception { + ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, + 100); + omMetadataManager = initializeNewOmMetadataManager( + Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir")) + .toFile()); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProviderWithFSO(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + Files.createDirectory(temporaryFolder.resolve("OmMetataDir")).toFile()); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .addBinding(OzoneStorageContainerManager.class, + getMockReconSCM()) + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) + .addBinding(NSSummaryEndpoint.class) + .build(); + ReconNamespaceSummaryManager reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); + + // populate OM DB and reprocess into Recon RocksDB + populateOMDB(); + NSSummaryTaskWithFSO nSSummaryTaskWithFso = + new NSSummaryTaskWithFSO(reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * @throws IOException ioEx + */ + private static OMMetadataManager initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + OzoneConfiguration omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + return omMetadataManager; + } + + @Test + public void testDiskUsageOrderingForRoot() throws Exception { + // root level DU + // Verify the ordering of subpaths under the root + verifyOrdering(ROOT_PATH); + } + + @Test + public void testDiskUsageOrderingForVolume() throws Exception { + // volume level DU + // Verify the ordering of subpaths under the volume + verifyOrdering("/volA"); + verifyOrdering("/volB"); + } + + @Test + public void testDiskUsageOrderingForBucket() throws Exception { + // bucket level DU + // Verify the ordering of subpaths under the bucket + verifyOrdering("/volA/bucketA1"); + verifyOrdering("/volA/bucketA2"); + verifyOrdering("/volA/bucketA3"); + verifyOrdering("/volB/bucketB1"); + } + + private void verifyOrdering(String path) + throws IOException { + Response response = + nsSummaryEndpoint.getDiskUsage(path, true, false, true); + DUResponse duRes = (DUResponse) response.getEntity(); + List duData = duRes.getDuData(); + List sortedDuData = new ArrayList<>(duData); + // Sort the DU data by size in descending order to compare with the original. + sortedDuData.sort( + Comparator.comparingLong(DUResponse.DiskUsage::getSize).reversed()); + + for (int i = 0; i < duData.size(); i++) { + assertEquals(sortedDuData.get(i).getSubpath(), + duData.get(i).getSubpath(), + "DU-Sub-Path under " + path + + " should be sorted by descending order of size"); + } + } + + /** + * Tests the NSSummaryEndpoint for a given volume, bucket, and directory structure. + * The test setup mimics the following filesystem structure with specified sizes: + * + * root + * ├── volA + * │ ├── bucketA1 + * │ │ ├── fileA1 (Size: 600KB) + * │ │ ├── fileA2 (Size: 80KB) + * │ │ ├── dirA1 (Total Size: 1500KB) + * │ │ ├── dirA2 (Total Size: 1700KB) + * │ │ └── dirA3 (Total Size: 1300KB) + * │ ├── bucketA2 + * │ │ ├── fileA3 (Size: 200KB) + * │ │ ├── fileA4 (Size: 4000KB) + * │ │ ├── dirA4 (Total Size: 1100KB) + * │ │ ├── dirA5 (Total Size: 1900KB) + * │ │ └── dirA6 (Total Size: 210KB) + * │ └── bucketA3 + * │ ├── fileA5 (Size: 5000KB) + * │ ├── fileA6 (Size: 700KB) + * │ ├── dirA7 (Total Size: 1200KB) + * │ ├── dirA8 (Total Size: 1600KB) + * │ └── dirA9 (Total Size: 180KB) + * └── volB + * └── bucketB1 + * ├── fileB1 (Size: 300KB) + * ├── fileB2 (Size: 500KB) + * ├── dirB1 (Total Size: 14000KB) + * ├── dirB2 (Total Size: 1800KB) + * └── dirB3 (Total Size: 2200KB) + * + * @throws Exception + */ + private void populateOMDB() throws Exception { + // Create Volumes + long volAObjectId = createVolume("volA"); + long volBObjectId = createVolume("volB"); + + // Create Buckets in volA + long bucketA1ObjectId = + createBucket("volA", "bucketA1", 600 + 80 + 1500 + 1700 + 1300); + long bucketA2ObjectId = + createBucket("volA", "bucketA2", 200 + 4000 + 1100 + 1900 + 210); + long bucketA3ObjectId = + createBucket("volA", "bucketA3", 5000 + 700 + 1200 + 1600 + 180); + + // Create Bucket in volB + long bucketB1ObjectId = + createBucket("volB", "bucketB1", 300 + 500 + 14000 + 1800 + 2200); + + // Create Directories and Files under bucketA1 + long dirA1ObjectId = + createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId, + "dirA1"); + long dirA2ObjectId = + createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId, + "dirA2"); + long dirA3ObjectId = + createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId, + "dirA3"); + + // Files directly under bucketA1 + createFile("fileA1", "bucketA1", "volA", "fileA1", bucketA1ObjectId, + bucketA1ObjectId, volAObjectId, 600 * 1024); + createFile("fileA2", "bucketA1", "volA", "fileA2", bucketA1ObjectId, + bucketA1ObjectId, volAObjectId, 80 * 1024); + + // Create Directories and Files under bucketA2 + long dirA4ObjectId = + createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId, + "dirA4"); + long dirA5ObjectId = + createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId, + "dirA5"); + long dirA6ObjectId = + createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId, + "dirA6"); + + // Files directly under bucketA2 + createFile("fileA3", "bucketA2", "volA", "fileA3", bucketA2ObjectId, + bucketA2ObjectId, volAObjectId, 200 * 1024); + createFile("fileA4", "bucketA2", "volA", "fileA4", bucketA2ObjectId, + bucketA2ObjectId, volAObjectId, 4000 * 1024); + + // Create Directories and Files under bucketA3 + long dirA7ObjectId = + createDirectory(bucketA3ObjectId, bucketA3ObjectId, volAObjectId, + "dirA7"); + long dirA8ObjectId = + createDirectory(bucketA3ObjectId, bucketA3ObjectId, volAObjectId, + "dirA8"); + long dirA9ObjectId = + createDirectory(bucketA3ObjectId, bucketA3ObjectId, volAObjectId, + "dirA9"); + + // Files directly under bucketA3 + createFile("fileA5", "bucketA3", "volA", "fileA5", bucketA3ObjectId, + bucketA3ObjectId, volAObjectId, 5000 * 1024); + createFile("fileA6", "bucketA3", "volA", "fileA6", bucketA3ObjectId, + bucketA3ObjectId, volAObjectId, 700 * 1024); + + // Create Directories and Files under bucketB1 + long dirB1ObjectId = + createDirectory(bucketB1ObjectId, bucketB1ObjectId, volBObjectId, + "dirB1"); + long dirB2ObjectId = + createDirectory(bucketB1ObjectId, bucketB1ObjectId, volBObjectId, + "dirB2"); + long dirB3ObjectId = + createDirectory(bucketB1ObjectId, bucketB1ObjectId, volBObjectId, + "dirB3"); + + // Files directly under bucketB1 + createFile("fileB1", "bucketB1", "volB", "fileB1", bucketB1ObjectId, + bucketB1ObjectId, volBObjectId, 300 * 1024); + createFile("fileB2", "bucketB1", "volB", "fileB2", bucketB1ObjectId, + bucketB1ObjectId, volBObjectId, 500 * 1024); + + // Create Inner files under directories + createFile("dirA1/innerFile", "bucketA1", "volA", "innerFile", + dirA1ObjectId, bucketA1ObjectId, volAObjectId, 1500 * 1024); + createFile("dirA2/innerFile", "bucketA1", "volA", "innerFile", + dirA2ObjectId, bucketA1ObjectId, volAObjectId, 1700 * 1024); + createFile("dirA3/innerFile", "bucketA1", "volA", "innerFile", + dirA3ObjectId, bucketA1ObjectId, volAObjectId, 1300 * 1024); + createFile("dirA4/innerFile", "bucketA2", "volA", "innerFile", + dirA4ObjectId, bucketA2ObjectId, volAObjectId, 1100 * 1024); + createFile("dirA5/innerFile", "bucketA2", "volA", "innerFile", + dirA5ObjectId, bucketA2ObjectId, volAObjectId, 1900 * 1024); + createFile("dirA6/innerFile", "bucketA2", "volA", "innerFile", + dirA6ObjectId, bucketA2ObjectId, volAObjectId, 210 * 1024); + createFile("dirA7/innerFile", "bucketA3", "volA", "innerFile", + dirA7ObjectId, bucketA3ObjectId, volAObjectId, 1200 * 1024); + createFile("dirA8/innerFile", "bucketA3", "volA", "innerFile", + dirA8ObjectId, bucketA3ObjectId, volAObjectId, 1600 * 1024); + createFile("dirA9/innerFile", "bucketA3", "volA", "innerFile", + dirA9ObjectId, bucketA3ObjectId, volAObjectId, 180 * 1024); + createFile("dirB1/innerFile", "bucketB1", "volB", "innerFile", + dirB1ObjectId, bucketB1ObjectId, volBObjectId, 14000 * 1024); + createFile("dirB2/innerFile", "bucketB1", "volB", "innerFile", + dirB2ObjectId, bucketB1ObjectId, volBObjectId, 1800 * 1024); + createFile("dirB3/innerFile", "bucketB1", "volB", "innerFile", + dirB3ObjectId, bucketB1ObjectId, volBObjectId, 2200 * 1024); + } + + /** + * Create a volume and add it to the Volume Table. + * @return volume Object ID + * @throws IOException + */ + private long createVolume(String volumeName) throws Exception { + String volumeKey = reconOMMetadataManager.getVolumeKey(volumeName); + long volumeId = UUID.randomUUID().getMostSignificantBits() & + Long.MAX_VALUE; // Generate positive ID + OmVolumeArgs args = OmVolumeArgs.newBuilder() + .setObjectID(volumeId) + .setVolume(volumeName) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + + reconOMMetadataManager.getVolumeTable().put(volumeKey, args); + return volumeId; + } + + /** + * Create a bucket and add it to the Bucket Table. + * @return bucket Object ID + * @throws IOException + */ + private long createBucket(String volumeName, String bucketName, long dataSize) + throws Exception { + String bucketKey = + reconOMMetadataManager.getBucketKey(volumeName, bucketName); + long bucketId = UUID.randomUUID().getMostSignificantBits() & + Long.MAX_VALUE; // Generate positive ID + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setObjectID(bucketId) + .setBucketLayout(getBucketLayout()) + .setUsedBytes(dataSize) + .build(); + + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo); + return bucketId; + } + + /** + * Create a directory and add it to the Directory Table. + * @return directory Object ID + * @throws IOException + */ + private long createDirectory(long parentObjectId, + long bucketObjectId, + long volumeObjectId, + String dirName) throws IOException { + long objectId = UUID.randomUUID().getMostSignificantBits() & + Long.MAX_VALUE; // Ensure positive ID + writeDirToOm(reconOMMetadataManager, objectId, parentObjectId, + bucketObjectId, + volumeObjectId, dirName); + return objectId; + } + + /** + * Create a file and add it to the File Table. + * @return file Object ID + * @throws IOException + */ + @SuppressWarnings("checkstyle:ParameterNumber") + private long createFile(String key, + String bucket, + String volume, + String fileName, + long parentObjectId, + long bucketObjectId, + long volumeObjectId, + long dataSize) throws IOException { + long objectId = UUID.randomUUID().getMostSignificantBits() & + Long.MAX_VALUE; // Ensure positive ID + writeKeyToOm(reconOMMetadataManager, key, bucket, volume, fileName, + objectId, + parentObjectId, bucketObjectId, volumeObjectId, dataSize, + getBucketLayout()); + return objectId; + } + + private static ReconStorageContainerManagerFacade getMockReconSCM() + throws ContainerNotFoundException { + ReconStorageContainerManagerFacade reconSCM = + mock(ReconStorageContainerManagerFacade.class); + ContainerManager containerManager = mock(ContainerManager.class); + + when(reconSCM.getContainerManager()).thenReturn(containerManager); + ReconNodeManager mockReconNodeManager = mock(ReconNodeManager.class); + when(reconSCM.getScmNodeManager()).thenReturn(mockReconNodeManager); + return reconSCM; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index cbe850b918f0..54da926601e5 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -42,12 +43,14 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.recon.api.types.DUResponse; -import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; -import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse; +import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; import org.apache.hadoop.ozone.recon.common.CommonUtils; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; @@ -57,9 +60,12 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.mockito.ArgumentCaptor; +import org.slf4j.Logger; import javax.ws.rs.core.Response; @@ -74,8 +80,6 @@ import java.util.Set; import java.util.HashSet; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; @@ -83,8 +87,12 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.verify; /** * Test for NSSummary REST APIs with FSO. @@ -114,6 +122,7 @@ public class TestNSSummaryEndpointWithFSO { private Path temporaryFolder; private ReconOMMetadataManager reconOMMetadataManager; + private ReconNamespaceSummaryManager reconNamespaceSummaryManager; private NSSummaryEndpoint nsSummaryEndpoint; private OzoneConfiguration ozoneConfiguration; private CommonUtils commonUtils; @@ -375,7 +384,7 @@ public void setUp() throws Exception { mock(StorageContainerServiceProviderImpl.class)) .addBinding(NSSummaryEndpoint.class) .build(); - ReconNamespaceSummaryManager reconNamespaceSummaryManager = + this.reconNamespaceSummaryManager = reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); @@ -449,7 +458,7 @@ public void testGetBasicInfoKey() throws Exception { public void testDiskUsageRoot() throws Exception { // root level DU Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, - false, false); + false, false, false); DUResponse duRootRes = (DUResponse) rootResponse.getEntity(); assertEquals(2, duRootRes.getCount()); List duRootData = duRootRes.getDuData(); @@ -463,11 +472,12 @@ public void testDiskUsageRoot() throws Exception { assertEquals(VOL_DATA_SIZE, duVol1.getSize()); assertEquals(VOL_TWO_DATA_SIZE, duVol2.getSize()); } + @Test public void testDiskUsageVolume() throws Exception { // volume level DU Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, - false, false); + false, false, false); DUResponse duVolRes = (DUResponse) volResponse.getEntity(); assertEquals(2, duVolRes.getCount()); List duData = duVolRes.getDuData(); @@ -482,11 +492,12 @@ public void testDiskUsageVolume() throws Exception { assertEquals(BUCKET_TWO_DATA_SIZE, duBucket2.getSize()); } + @Test public void testDiskUsageBucket() throws Exception { // bucket level DU Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, - false, false); + false, false, false); DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); assertEquals(1, duBucketResponse.getCount()); DUResponse.DiskUsage duDir1 = duBucketResponse.getDuData().get(0); @@ -494,11 +505,12 @@ public void testDiskUsageBucket() throws Exception { assertEquals(DIR_ONE_DATA_SIZE, duDir1.getSize()); } + @Test public void testDiskUsageDir() throws Exception { // dir level DU Response dirResponse = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH, - false, false); + false, false, false); DUResponse duDirReponse = (DUResponse) dirResponse.getEntity(); assertEquals(3, duDirReponse.getCount()); List duSubDir = duDirReponse.getDuData(); @@ -517,21 +529,23 @@ public void testDiskUsageDir() throws Exception { assertEquals(KEY_SIX_SIZE, duDir4.getSize()); } + @Test public void testDiskUsageKey() throws Exception { // key level DU Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, - false, false); + false, false, false); DUResponse keyObj = (DUResponse) keyResponse.getEntity(); assertEquals(0, keyObj.getCount()); assertEquals(KEY_FOUR_SIZE, keyObj.getSize()); } + @Test public void testDiskUsageUnknown() throws Exception { // invalid path check Response invalidResponse = nsSummaryEndpoint.getDiskUsage(INVALID_PATH, - false, false); + false, false, false); DUResponse invalidObj = (DUResponse) invalidResponse.getEntity(); assertEquals(ResponseStatus.PATH_NOT_FOUND, invalidObj.getStatus()); @@ -541,7 +555,7 @@ public void testDiskUsageUnknown() throws Exception { public void testDiskUsageWithReplication() throws Exception { setUpMultiBlockKey(); Response keyResponse = nsSummaryEndpoint.getDiskUsage(MULTI_BLOCK_KEY_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_KEY_SIZE_WITH_REPLICA, @@ -553,7 +567,7 @@ public void testDataSizeUnderRootWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); // withReplica is true Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) rootResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT, @@ -567,7 +581,7 @@ public void testDataSizeUnderRootWithReplication() throws IOException { public void testDataSizeUnderVolWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, @@ -580,7 +594,7 @@ public void testDataSizeUnderVolWithReplication() throws IOException { public void testDataSizeUnderBucketWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, @@ -599,7 +613,7 @@ public void testDataSizeUnderBucketWithReplication() throws IOException { public void testDataSizeUnderDirWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response dir1Response = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) dir1Response.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_DIR1, @@ -612,7 +626,7 @@ public void testDataSizeUnderDirWithReplication() throws IOException { public void testDataSizeUnderKeyWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY, @@ -691,6 +705,151 @@ public void checkFileSizeDist(String path, int bin0, } } + @Test + public void testConstructFullPath() throws IOException { + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName("file2") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_TWO_OBJECT_ID) + .setParentObjectID(DIR_TWO_OBJECT_ID) + .build(); + // Call constructFullPath and verify the result + String fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + String expectedPath = "vol/bucket1/dir1/dir2/file2"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 3 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file3") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_THREE_OBJECT_ID) + .setParentObjectID(DIR_THREE_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + expectedPath = "vol/bucket1/dir1/dir3/file3"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 6 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file6") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_SIX_OBJECT_ID) + .setParentObjectID(DIR_FOUR_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + expectedPath = "vol/bucket1/dir1/dir4/file6"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 1 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file1") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_ONE_OBJECT_ID) + .setParentObjectID(BUCKET_ONE_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + expectedPath = "vol/bucket1/file1"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 9 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file9") + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_THREE) + .setObjectID(KEY_NINE_OBJECT_ID) + .setParentObjectID(DIR_FIVE_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + expectedPath = "vol2/bucket3/dir5/file9"; + Assertions.assertEquals(expectedPath, fullPath); + + // Check for when we encounter a NSSUmamry with parentId -1 + // Fetch NSSummary for dir1 and immediately update its parentId. + NSSummary dir1Summary = reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID); + dir1Summary.setParentId(-1); // Update parentId to -1 + + reconNamespaceSummaryManager.deleteNSSummary(DIR_ONE_OBJECT_ID); + reconNamespaceSummaryManager.storeNSSummary(DIR_ONE_OBJECT_ID, dir1Summary); + + NSSummary changedDir1Summary = reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID); + Assertions.assertEquals(-1, changedDir1Summary.getParentId(), "The parentId should be updated to -1"); + + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file2") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_TWO_OBJECT_ID) + .setParentObjectID(DIR_TWO_OBJECT_ID) + .build(); + // Call constructFullPath and verify the result + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + } + + @Test + public void testConstructFullPathWithNegativeParentIdTriggersRebuild() throws IOException { + // Setup + long dirOneObjectId = 1L; // Sample object ID for the directory + ReconNamespaceSummaryManager mockSummaryManager = mock(ReconNamespaceSummaryManager.class); + ReconOMMetadataManager mockMetadataManager = mock(ReconOMMetadataManager.class); + NSSummary dir1Summary = new NSSummary(); + dir1Summary.setParentId(-1); // Simulate directory at the top of the tree + when(mockSummaryManager.getNSSummary(dirOneObjectId)).thenReturn(dir1Summary); + + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName("file2") + .setVolumeName("vol") + .setBucketName("bucket1") + .setObjectID(2L) + .setParentObjectID(dirOneObjectId) + .build(); + + String result = ReconUtils.constructFullPath(keyInfo, mockSummaryManager, mockMetadataManager); + assertEquals("", result, "Expected an empty string return due to rebuild trigger"); + } + + @Test + public void testLoggingWhenParentIdIsNegative() throws IOException { + ReconNamespaceSummaryManager mockManager = + mock(ReconNamespaceSummaryManager.class); + Logger mockLogger = mock(Logger.class); + ReconUtils.setLogger(mockLogger); + + NSSummary mockSummary = new NSSummary(); + mockSummary.setParentId(-1); + when(mockManager.getNSSummary(anyLong())).thenReturn(mockSummary); + + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName("testKey") + .setVolumeName("vol") + .setBucketName("bucket") + .setObjectID(1L) + .setParentObjectID(1L) + .build(); + + ReconUtils.constructFullPath(keyInfo, mockManager, null); + + // Assert + ArgumentCaptor logCaptor = ArgumentCaptor.forClass(String.class); + verify(mockLogger).warn(logCaptor.capture()); + String loggedMessage = logCaptor.getValue(); + + // Here we can assert the exact message we expect to see in the logs. + assertEquals( + "NSSummary tree is currently being rebuilt, returning empty string " + + "for path construction.", loggedMessage); + } + + /** * Write directories and keys info into OM DB. * @throws Exception @@ -1247,7 +1406,7 @@ private static BucketLayout getBucketLayout() { } private static SCMNodeStat getMockSCMRootStat() { - return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, + return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index ba00f843f447..dba245ce8b80 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -43,6 +44,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -58,6 +60,7 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -74,15 +77,17 @@ import java.util.Set; import java.util.HashSet; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.setConfiguration; + import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -113,6 +118,7 @@ public class TestNSSummaryEndpointWithLegacy { @TempDir private Path temporaryFolder; + private ReconNamespaceSummaryManager reconNamespaceSummaryManager; private ReconOMMetadataManager reconOMMetadataManager; private NSSummaryEndpoint nsSummaryEndpoint; private OzoneConfiguration conf; @@ -243,10 +249,10 @@ public class TestNSSummaryEndpointWithLegacy { StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE6_SIZE_WITH_REPLICA = getReplicatedSize(KEY_SIX_SIZE, - StandaloneReplicationConfig.getInstance(ONE));; + StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE7_SIZE_WITH_REPLICA = getReplicatedSize(KEY_SEVEN_SIZE, - StandaloneReplicationConfig.getInstance(ONE));; + StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE8_SIZE_WITH_REPLICA = getReplicatedSize(KEY_EIGHT_SIZE, StandaloneReplicationConfig.getInstance(ONE)); @@ -376,7 +382,7 @@ public void setUp() throws Exception { mock(StorageContainerServiceProviderImpl.class)) .addBinding(NSSummaryEndpoint.class) .build(); - ReconNamespaceSummaryManager reconNamespaceSummaryManager = + this.reconNamespaceSummaryManager = reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); @@ -449,7 +455,7 @@ public void testGetBasicInfoKey() throws Exception { public void testDiskUsageRoot() throws Exception { // root level DU Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, - false, false); + false, false, false); DUResponse duRootRes = (DUResponse) rootResponse.getEntity(); assertEquals(2, duRootRes.getCount()); List duRootData = duRootRes.getDuData(); @@ -468,7 +474,7 @@ public void testDiskUsageRoot() throws Exception { public void testDiskUsageVolume() throws Exception { // volume level DU Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, - false, false); + false, false, false); DUResponse duVolRes = (DUResponse) volResponse.getEntity(); assertEquals(2, duVolRes.getCount()); List duData = duVolRes.getDuData(); @@ -487,7 +493,7 @@ public void testDiskUsageVolume() throws Exception { public void testDiskUsageBucket() throws Exception { // bucket level DU Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, - false, false); + false, false, false); DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); assertEquals(1, duBucketResponse.getCount()); DUResponse.DiskUsage duDir1 = duBucketResponse.getDuData().get(0); @@ -499,7 +505,7 @@ public void testDiskUsageBucket() throws Exception { public void testDiskUsageDir() throws Exception { // dir level DU Response dirResponse = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH, - false, false); + false, false, false); DUResponse duDirReponse = (DUResponse) dirResponse.getEntity(); assertEquals(3, duDirReponse.getCount()); List duSubDir = duDirReponse.getDuData(); @@ -522,7 +528,7 @@ public void testDiskUsageDir() throws Exception { public void testDiskUsageKey() throws Exception { // key level DU Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, - false, false); + false, false, false); DUResponse keyObj = (DUResponse) keyResponse.getEntity(); assertEquals(0, keyObj.getCount()); assertEquals(KEY_FOUR_SIZE, keyObj.getSize()); @@ -532,7 +538,7 @@ public void testDiskUsageKey() throws Exception { public void testDiskUsageUnknown() throws Exception { // invalid path check Response invalidResponse = nsSummaryEndpoint.getDiskUsage(INVALID_PATH, - false, false); + false, false, false); DUResponse invalidObj = (DUResponse) invalidResponse.getEntity(); assertEquals(ResponseStatus.PATH_NOT_FOUND, invalidObj.getStatus()); @@ -542,7 +548,7 @@ public void testDiskUsageUnknown() throws Exception { public void testDiskUsageWithReplication() throws Exception { setUpMultiBlockKey(); Response keyResponse = nsSummaryEndpoint.getDiskUsage(MULTI_BLOCK_KEY_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_KEY_SIZE_WITH_REPLICA, @@ -554,7 +560,7 @@ public void testDataSizeUnderRootWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); // withReplica is true Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) rootResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT, @@ -568,7 +574,7 @@ public void testDataSizeUnderRootWithReplication() throws IOException { public void testDataSizeUnderVolWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, @@ -581,7 +587,7 @@ public void testDataSizeUnderVolWithReplication() throws IOException { public void testDataSizeUnderBucketWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, @@ -600,7 +606,7 @@ public void testDataSizeUnderBucketWithReplication() throws IOException { public void testDataSizeUnderDirWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response dir1Response = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) dir1Response.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_DIR1, @@ -613,7 +619,7 @@ public void testDataSizeUnderDirWithReplication() throws IOException { public void testDataSizeUnderKeyWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, - false, true); + false, true, false); DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY, @@ -692,6 +698,48 @@ public void checkFileSizeDist(String path, int bin0, } } + @Test + public void testConstructFullPath() throws IOException { + // For Key Tables the parent object ID is not set hence it + // will by default be set as -1 when the NSSummary object is created + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName("dir1/dir2/file2") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_TWO_OBJECT_ID) + .build(); + // Call constructFullPath and verify the result + String fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + String expectedPath = "vol/bucket1/dir1/dir2/file2"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 3 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("dir1/dir2/") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(DIR_TWO_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + expectedPath = "vol/bucket1/dir1/dir2/"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 6 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("dir1/dir4/file6") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_SIX_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + expectedPath = "vol/bucket1/dir1/dir4/file6"; + Assertions.assertEquals(expectedPath, fullPath); + } + + /** * Write directories and keys info into OM DB. * @throws Exception @@ -875,6 +923,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( omDbDir.getAbsolutePath()); omConfiguration.set(OMConfigKeys .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + setConfiguration(omConfiguration); OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( omConfiguration, null); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java new file mode 100644 index 000000000000..6a2f2c557db8 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -0,0 +1,1472 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; +import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; +import org.apache.hadoop.ozone.recon.api.types.BucketObjectDBInfo; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; +import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse; +import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.VolumeObjectDBInfo; +import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; +import org.apache.hadoop.ozone.recon.common.CommonUtils; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import javax.ws.rs.core.Response; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests the NSSummary REST APIs within the context of an Object Store (OBS) layout, + * as well as Legacy layout buckets with FileSystemPaths disabled. The tests aim to + * validate API responses for buckets that follow the flat hierarchy model typical + * of OBS layouts. + *

    + * The test environment simulates a simple object storage structure with volumes + * containing buckets, which in turn contain files. Specifically, it includes: + * - Two OBS layout buckets (bucket1 and bucket2) under 'vol', each containing + * multiple files. + * - Two Legacy layout buckets (bucket3 and bucket4) under 'vol2', with 'bucket4' + * the fileSystemEnabled flag set to false for these legacy buckets. + *

    + * The directory structure for testing is as follows: + * . + * └── vol + * ├── bucket1 (OBS) + * │ ├── KEY_ONE + * │ ├── KEY_TWO + * │ └── KEY_THREE + * └── bucket2 (OBS) + * ├── KEY_FOUR + * └── KEY_FIVE + * └── vol2 + * ├── bucket3 (Legacy) + * │ ├── KEY_EIGHT + * │ ├── KEY_NINE + * │ └── KEY_TEN + * └── bucket4 (Legacy) + * └── KEY_ELEVEN + */ +public class TestNSSummaryEndpointWithOBSAndLegacy { + @TempDir + private Path temporaryFolder; + + private ReconOMMetadataManager reconOMMetadataManager; + private ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private NSSummaryEndpoint nsSummaryEndpoint; + private OzoneConfiguration conf; + private CommonUtils commonUtils; + + private static final String TEST_PATH_UTILITY = + "/vol1/buck1/a/b/c/d/e/file1.txt"; + private static final String PARENT_DIR = "vol1/buck1/a/b/c/d/e"; + private static final String[] TEST_NAMES = + new String[]{"vol1", "buck1", "a", "b", "c", "d", "e", "file1.txt"}; + private static final String TEST_KEY_NAMES = "a/b/c/d/e/file1.txt"; + + // Object names + private static final String VOL = "vol"; + private static final String VOL_TWO = "vol2"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String BUCKET_THREE = "bucket3"; + private static final String BUCKET_FOUR = "bucket4"; + private static final String KEY_ONE = "file1"; + private static final String KEY_TWO = "////file2"; + private static final String KEY_THREE = "file3///"; + private static final String KEY_FOUR = "file4"; + private static final String KEY_FIVE = "_//////"; + private static final String KEY_EIGHT = "file8"; + private static final String KEY_NINE = "//////"; + private static final String KEY_TEN = "///__file10"; + private static final String KEY_ELEVEN = "////file11"; + private static final String MULTI_BLOCK_FILE = KEY_THREE; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long VOL_TWO_OBJECT_ID = 14L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long BUCKET_THREE_OBJECT_ID = 15L; + private static final long BUCKET_FOUR_OBJECT_ID = 16L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_EIGHT_OBJECT_ID = 17L; + private static final long KEY_NINE_OBJECT_ID = 19L; + private static final long KEY_TEN_OBJECT_ID = 20L; + private static final long KEY_ELEVEN_OBJECT_ID = 21L; + private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L; + + // container IDs + private static final long CONTAINER_ONE_ID = 1L; + private static final long CONTAINER_TWO_ID = 2L; + private static final long CONTAINER_THREE_ID = 3L; + private static final long CONTAINER_FOUR_ID = 4L; + private static final long CONTAINER_FIVE_ID = 5L; + private static final long CONTAINER_SIX_ID = 6L; + + // replication factors + private static final int CONTAINER_ONE_REPLICA_COUNT = 3; + private static final int CONTAINER_TWO_REPLICA_COUNT = 2; + private static final int CONTAINER_THREE_REPLICA_COUNT = 4; + private static final int CONTAINER_FOUR_REPLICA_COUNT = 5; + private static final int CONTAINER_FIVE_REPLICA_COUNT = 2; + private static final int CONTAINER_SIX_REPLICA_COUNT = 3; + + // block lengths + private static final long BLOCK_ONE_LENGTH = 1000L; + private static final long BLOCK_TWO_LENGTH = 2000L; + private static final long BLOCK_THREE_LENGTH = 3000L; + private static final long BLOCK_FOUR_LENGTH = 4000L; + private static final long BLOCK_FIVE_LENGTH = 5000L; + private static final long BLOCK_SIX_LENGTH = 6000L; + + // data size in bytes + private static final long FILE_ONE_SIZE = 500L; // bin 0 + private static final long FILE_TWO_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_THREE_SIZE = 4 * OzoneConsts.KB + 1; // bin 3 + private static final long FILE_FOUR_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_FIVE_SIZE = 100L; // bin 0 + private static final long FILE_EIGHT_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 + + private static final long FILE1_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_ONE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE2_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_TWO_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE3_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_THREE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE4_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_FOUR_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE5_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_FIVE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + + private static final long FILE8_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_EIGHT_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE9_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_NINE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE10_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_TEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE11_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_ELEVEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + + private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA + = FILE3_SIZE_WITH_REPLICA; + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA + + FILE4_SIZE_WITH_REPLICA + + FILE5_SIZE_WITH_REPLICA + + FILE8_SIZE_WITH_REPLICA + + FILE9_SIZE_WITH_REPLICA + + FILE10_SIZE_WITH_REPLICA + + FILE11_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA + + FILE4_SIZE_WITH_REPLICA + + FILE5_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1 + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3 + = FILE8_SIZE_WITH_REPLICA + + FILE9_SIZE_WITH_REPLICA + + FILE10_SIZE_WITH_REPLICA; + + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY + = FILE4_SIZE_WITH_REPLICA; + + // quota in bytes + private static final long ROOT_QUOTA = 2 * (2 * OzoneConsts.MB); + private static final long VOL_QUOTA = 2 * OzoneConsts.MB; + private static final long VOL_TWO_QUOTA = 2 * OzoneConsts.MB; + private static final long BUCKET_ONE_QUOTA = OzoneConsts.MB; + private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB; + private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB; + private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB; + + // mock client's path requests + private static final String TEST_USER = "TestUser"; + private static final String ROOT_PATH = "/"; + private static final String VOL_PATH = ROOT_PATH + VOL; + private static final String VOL_TWO_PATH = ROOT_PATH + VOL_TWO; + private static final String BUCKET_ONE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE; + private static final String BUCKET_TWO_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO; + private static final String BUCKET_THREE_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE; + private static final String BUCKET_FOUR_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR; + private static final String KEY_ONE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_ONE; + private static final String KEY_TWO_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_TWO; + private static final String KEY_THREE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; + private static final String KEY_FOUR_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; + private static final String KEY_FIVE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FIVE; + private static final String KEY_EIGHT_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_EIGHT; + private static final String KEY_NINE_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_NINE; + private static final String KEY_TEN_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_TEN; + private static final String KEY_ELEVEN_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR + ROOT_PATH + KEY_ELEVEN; + private static final String KEY4_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; + private static final String MULTI_BLOCK_KEY_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; + private static final String INVALID_PATH = "/vol/path/not/found"; + + // some expected answers + private static final long ROOT_DATA_SIZE = + FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE + + FILE_FIVE_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + + FILE_ELEVEN_SIZE; + private static final long VOL_DATA_SIZE = FILE_ONE_SIZE + FILE_TWO_SIZE + + FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE; + + private static final long VOL_TWO_DATA_SIZE = + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE; + + private static final long BUCKET_ONE_DATA_SIZE = FILE_ONE_SIZE + + FILE_TWO_SIZE + + FILE_THREE_SIZE; + + private static final long BUCKET_TWO_DATA_SIZE = + FILE_FOUR_SIZE + FILE_FIVE_SIZE; + + private static final long BUCKET_THREE_DATA_SIZE = + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE; + + private static final long BUCKET_FOUR_DATA_SIZE = FILE_ELEVEN_SIZE; + + + @BeforeEach + public void setUp() throws Exception { + conf = new OzoneConfiguration(); + // By setting this config our Legacy buckets will behave like OBS buckets. + conf.set(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false"); + OMMetadataManager omMetadataManager = initializeNewOmMetadataManager( + Files.createDirectory(temporaryFolder.resolve( + "JunitOmDBDir")).toFile(), conf); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProvider(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + Files.createDirectory(temporaryFolder.resolve( + "omMetadatDir")).toFile()); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .addBinding(OzoneStorageContainerManager.class, + getMockReconSCM()) + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) + .addBinding(NSSummaryEndpoint.class) + .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); + + // populate OM DB and reprocess into Recon RocksDB + populateOMDB(); + NSSummaryTaskWithOBS nsSummaryTaskWithOBS = + new NSSummaryTaskWithOBS(reconNamespaceSummaryManager, + reconOMMetadataManager, conf); + nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy = + new NSSummaryTaskWithLegacy(reconNamespaceSummaryManager, + reconOMMetadataManager, conf); + nsSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + commonUtils = new CommonUtils(); + } + + @Test + public void testUtility() { + String[] names = EntityHandler.parseRequestPath(TEST_PATH_UTILITY); + assertArrayEquals(TEST_NAMES, names); + String keyName = BucketHandler.getKeyName(names); + assertEquals(TEST_KEY_NAMES, keyName); + String subpath = BucketHandler.buildSubpath(PARENT_DIR, "file1.txt"); + assertEquals(TEST_PATH_UTILITY, subpath); + } + + @Test + public void testGetBasicInfoRoot() throws Exception { + // Test root basics + Response rootResponse = nsSummaryEndpoint.getBasicInfo(ROOT_PATH); + NamespaceSummaryResponse rootResponseObj = + (NamespaceSummaryResponse) rootResponse.getEntity(); + assertEquals(EntityType.ROOT, rootResponseObj.getEntityType()); + assertEquals(2, rootResponseObj.getCountStats().getNumVolume()); + assertEquals(4, rootResponseObj.getCountStats().getNumBucket()); + assertEquals(9, rootResponseObj.getCountStats().getNumTotalKey()); + } + + @Test + public void testGetBasicInfoVol() throws Exception { + // Test volume basics + Response volResponse = nsSummaryEndpoint.getBasicInfo(VOL_PATH); + NamespaceSummaryResponse volResponseObj = + (NamespaceSummaryResponse) volResponse.getEntity(); + assertEquals(EntityType.VOLUME, + volResponseObj.getEntityType()); + assertEquals(2, volResponseObj.getCountStats().getNumBucket()); + assertEquals(5, volResponseObj.getCountStats().getNumTotalKey()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. + getObjectDBInfo()).getAdmin()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. + getObjectDBInfo()).getOwner()); + assertEquals(VOL, volResponseObj.getObjectDBInfo().getName()); + assertEquals(2097152, volResponseObj.getObjectDBInfo().getQuotaInBytes()); + assertEquals(-1, volResponseObj.getObjectDBInfo().getQuotaInNamespace()); + } + + @Test + public void testGetBasicInfoVolTwo() throws Exception { + // Test volume 2's basics + Response volTwoResponse = nsSummaryEndpoint.getBasicInfo(VOL_TWO_PATH); + NamespaceSummaryResponse volTwoResponseObj = + (NamespaceSummaryResponse) volTwoResponse.getEntity(); + assertEquals(EntityType.VOLUME, + volTwoResponseObj.getEntityType()); + assertEquals(2, volTwoResponseObj.getCountStats().getNumBucket()); + assertEquals(4, volTwoResponseObj.getCountStats().getNumTotalKey()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. + getObjectDBInfo()).getAdmin()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. + getObjectDBInfo()).getOwner()); + assertEquals(VOL_TWO, volTwoResponseObj.getObjectDBInfo().getName()); + assertEquals(2097152, + volTwoResponseObj.getObjectDBInfo().getQuotaInBytes()); + assertEquals(-1, volTwoResponseObj.getObjectDBInfo().getQuotaInNamespace()); + } + + @Test + public void testGetBasicInfoBucketOne() throws Exception { + // Test bucket 1's basics + Response bucketOneResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_ONE_PATH); + NamespaceSummaryResponse bucketOneObj = + (NamespaceSummaryResponse) bucketOneResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketOneObj.getEntityType()); + assertEquals(3, bucketOneObj.getCountStats().getNumTotalKey()); + assertEquals(VOL, + ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketOneObj.getObjectDBInfo()).getStorageType()); + assertEquals(getOBSBucketLayout(), + ((BucketObjectDBInfo) + bucketOneObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_ONE, + ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketTwo() throws Exception { + // Test bucket 2's basics + Response bucketTwoResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_TWO_PATH); + NamespaceSummaryResponse bucketTwoObj = + (NamespaceSummaryResponse) bucketTwoResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketTwoObj.getEntityType()); + assertEquals(2, bucketTwoObj.getCountStats().getNumTotalKey()); + assertEquals(VOL, + ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketTwoObj.getObjectDBInfo()).getStorageType()); + assertEquals(getOBSBucketLayout(), + ((BucketObjectDBInfo) + bucketTwoObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_TWO, + ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketThree() throws Exception { + // Test bucket 3's basics + Response bucketThreeResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_THREE_PATH); + NamespaceSummaryResponse bucketThreeObj = (NamespaceSummaryResponse) + bucketThreeResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketThreeObj.getEntityType()); + assertEquals(3, bucketThreeObj.getCountStats().getNumTotalKey()); + assertEquals(VOL_TWO, + ((BucketObjectDBInfo) bucketThreeObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketThreeObj.getObjectDBInfo()).getStorageType()); + assertEquals(getLegacyBucketLayout(), + ((BucketObjectDBInfo) + bucketThreeObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_THREE, + ((BucketObjectDBInfo) bucketThreeObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketFour() throws Exception { + // Test bucket 4's basics + Response bucketFourResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_FOUR_PATH); + NamespaceSummaryResponse bucketFourObj = + (NamespaceSummaryResponse) bucketFourResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketFourObj.getEntityType()); + assertEquals(1, bucketFourObj.getCountStats().getNumTotalKey()); + assertEquals(VOL_TWO, + ((BucketObjectDBInfo) bucketFourObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketFourObj.getObjectDBInfo()).getStorageType()); + assertEquals(getLegacyBucketLayout(), + ((BucketObjectDBInfo) + bucketFourObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_FOUR, + ((BucketObjectDBInfo) bucketFourObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoNoPath() throws Exception { + // Test invalid path + commonUtils.testNSSummaryBasicInfoNoPath(nsSummaryEndpoint); + } + + @Test + public void testGetBasicInfoKey() throws Exception { + // Test key + commonUtils.testNSSummaryBasicInfoKey(nsSummaryEndpoint); + } + + @Test + public void testDiskUsageRoot() throws Exception { + // root level DU + Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, + false, false, false); + DUResponse duRootRes = (DUResponse) rootResponse.getEntity(); + assertEquals(2, duRootRes.getCount()); + List duRootData = duRootRes.getDuData(); + // sort based on subpath + Collections.sort(duRootData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duVol1 = duRootData.get(0); + DUResponse.DiskUsage duVol2 = duRootData.get(1); + assertEquals(VOL_PATH, duVol1.getSubpath()); + assertEquals(VOL_TWO_PATH, duVol2.getSubpath()); + assertEquals(VOL_DATA_SIZE, duVol1.getSize()); + assertEquals(VOL_TWO_DATA_SIZE, duVol2.getSize()); + } + + @Test + public void testDiskUsageVolume() throws Exception { + // volume level DU + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, + false, false, false); + DUResponse duVolRes = (DUResponse) volResponse.getEntity(); + assertEquals(2, duVolRes.getCount()); + List duData = duVolRes.getDuData(); + // sort based on subpath + Collections.sort(duData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duBucket1 = duData.get(0); + DUResponse.DiskUsage duBucket2 = duData.get(1); + assertEquals(BUCKET_ONE_PATH, duBucket1.getSubpath()); + assertEquals(BUCKET_TWO_PATH, duBucket2.getSubpath()); + assertEquals(BUCKET_ONE_DATA_SIZE, duBucket1.getSize()); + assertEquals(BUCKET_TWO_DATA_SIZE, duBucket2.getSize()); + } + + @Test + public void testDiskUsageVolTwo() throws Exception { + // volume level DU + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_TWO_PATH, + false, false, false); + DUResponse duVolRes = (DUResponse) volResponse.getEntity(); + assertEquals(2, duVolRes.getCount()); + List duData = duVolRes.getDuData(); + // sort based on subpath + Collections.sort(duData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duBucket3 = duData.get(0); + DUResponse.DiskUsage duBucket4 = duData.get(1); + assertEquals(BUCKET_THREE_PATH, duBucket3.getSubpath()); + assertEquals(BUCKET_FOUR_PATH, duBucket4.getSubpath()); + assertEquals(VOL_TWO_DATA_SIZE, duVolRes.getSize()); + } + + @Test + public void testDiskUsageBucketOne() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, + false, false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_ONE_PATH, true, false, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(3, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_ONE_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageBucketTwo() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_TWO_PATH, + false, false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_TWO_PATH, true, false, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(2, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_TWO_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageBucketThree() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH, + false, false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this Legacy bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_THREE_PATH, true, false, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(3, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_THREE_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageKey1() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ONE_PATH, + false, false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_ONE_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey2() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_TWO_PATH, + false, false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_TWO_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey4() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH, + true, false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_FOUR_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey5() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_FIVE_PATH, + false, false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_FIVE_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey8() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_EIGHT_PATH, + false, false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_EIGHT_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey11() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ELEVEN_PATH, + false, false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_ELEVEN_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageUnknown() throws Exception { + // invalid path check + Response invalidResponse = nsSummaryEndpoint.getDiskUsage(INVALID_PATH, + false, false, false); + DUResponse invalidObj = (DUResponse) invalidResponse.getEntity(); + assertEquals(ResponseStatus.PATH_NOT_FOUND, + invalidObj.getStatus()); + } + + @Test + public void testDiskUsageWithReplication() throws Exception { + setUpMultiBlockKey(); + Response keyResponse = nsSummaryEndpoint.getDiskUsage(MULTI_BLOCK_KEY_PATH, + false, true, false); + DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_KEY_SIZE_WITH_REPLICA, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderRootWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + // withReplica is true + Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, + false, true, false); + DUResponse replicaDUResponse = (DUResponse) rootResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT, + replicaDUResponse.getSizeWithReplica()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, + replicaDUResponse.getDuData().get(0).getSizeWithReplica()); + + } + + @Test + public void testDataSizeUnderVolWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, + false, true, false); + DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, + replicaDUResponse.getSizeWithReplica()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, + replicaDUResponse.getDuData().get(0).getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderBucketOneWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, + false, true, false); + DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderBucketThreeWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH, + false, true, false); + DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderKeyWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH, + false, true, false); + DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testQuotaUsage() throws Exception { + // root level quota usage + Response rootResponse = nsSummaryEndpoint.getQuotaUsage(ROOT_PATH); + QuotaUsageResponse quRootRes = + (QuotaUsageResponse) rootResponse.getEntity(); + assertEquals(ROOT_QUOTA, quRootRes.getQuota()); + assertEquals(ROOT_DATA_SIZE, quRootRes.getQuotaUsed()); + + // volume level quota usage + Response volResponse = nsSummaryEndpoint.getQuotaUsage(VOL_PATH); + QuotaUsageResponse quVolRes = (QuotaUsageResponse) volResponse.getEntity(); + assertEquals(VOL_QUOTA, quVolRes.getQuota()); + assertEquals(VOL_DATA_SIZE, quVolRes.getQuotaUsed()); + + // bucket level quota usage + Response bucketRes = nsSummaryEndpoint.getQuotaUsage(BUCKET_ONE_PATH); + QuotaUsageResponse quBucketRes = (QuotaUsageResponse) bucketRes.getEntity(); + assertEquals(BUCKET_ONE_QUOTA, quBucketRes.getQuota()); + assertEquals(BUCKET_ONE_DATA_SIZE, quBucketRes.getQuotaUsed()); + + Response bucketRes2 = nsSummaryEndpoint.getQuotaUsage(BUCKET_TWO_PATH); + QuotaUsageResponse quBucketRes2 = + (QuotaUsageResponse) bucketRes2.getEntity(); + assertEquals(BUCKET_TWO_QUOTA, quBucketRes2.getQuota()); + assertEquals(BUCKET_TWO_DATA_SIZE, quBucketRes2.getQuotaUsed()); + + Response bucketRes3 = nsSummaryEndpoint.getQuotaUsage(BUCKET_THREE_PATH); + QuotaUsageResponse quBucketRes3 = + (QuotaUsageResponse) bucketRes3.getEntity(); + assertEquals(BUCKET_THREE_QUOTA, quBucketRes3.getQuota()); + assertEquals(BUCKET_THREE_DATA_SIZE, quBucketRes3.getQuotaUsed()); + + Response bucketRes4 = nsSummaryEndpoint.getQuotaUsage(BUCKET_FOUR_PATH); + QuotaUsageResponse quBucketRes4 = + (QuotaUsageResponse) bucketRes4.getEntity(); + assertEquals(BUCKET_FOUR_QUOTA, quBucketRes4.getQuota()); + assertEquals(BUCKET_FOUR_DATA_SIZE, quBucketRes4.getQuotaUsed()); + + // other level not applicable + Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY4_PATH); + QuotaUsageResponse quotaUsageResponse2 = + (QuotaUsageResponse) naResponse2.getEntity(); + assertEquals(ResponseStatus.TYPE_NOT_APPLICABLE, + quotaUsageResponse2.getResponseCode()); + + // invalid path request + Response invalidRes = nsSummaryEndpoint.getQuotaUsage(INVALID_PATH); + QuotaUsageResponse invalidResObj = + (QuotaUsageResponse) invalidRes.getEntity(); + assertEquals(ResponseStatus.PATH_NOT_FOUND, + invalidResObj.getResponseCode()); + } + + + @Test + public void testFileSizeDist() throws Exception { + checkFileSizeDist(ROOT_PATH, 2, 3, 3, 1); + checkFileSizeDist(VOL_PATH, 2, 1, 1, 1); + checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 0, 1); + } + + public void checkFileSizeDist(String path, int bin0, + int bin1, int bin2, int bin3) throws Exception { + Response res = nsSummaryEndpoint.getFileSizeDistribution(path); + FileSizeDistributionResponse fileSizeDistResObj = + (FileSizeDistributionResponse) res.getEntity(); + int[] fileSizeDist = fileSizeDistResObj.getFileSizeDist(); + assertEquals(bin0, fileSizeDist[0]); + assertEquals(bin1, fileSizeDist[1]); + assertEquals(bin2, fileSizeDist[2]); + assertEquals(bin3, fileSizeDist[3]); + for (int i = 4; i < ReconConstants.NUM_OF_FILE_SIZE_BINS; ++i) { + assertEquals(0, fileSizeDist[i]); + } + } + + @Test + public void testNormalizePathUptoBucket() { + // Test null or empty path + assertEquals("/", OmUtils.normalizePathUptoBucket(null)); + assertEquals("/", OmUtils.normalizePathUptoBucket("")); + + // Test path with leading slashes + assertEquals("volume1/bucket1/key1/key2", + OmUtils.normalizePathUptoBucket("///volume1/bucket1/key1/key2")); + + // Test volume and bucket names + assertEquals("volume1/bucket1", + OmUtils.normalizePathUptoBucket("volume1/bucket1")); + + // Test with additional segments + assertEquals("volume1/bucket1/key1/key2", + OmUtils.normalizePathUptoBucket("volume1/bucket1/key1/key2")); + + // Test path with multiple slashes in key names. + assertEquals("volume1/bucket1/key1//key2", + OmUtils.normalizePathUptoBucket("volume1/bucket1/key1//key2")); + + // Test path with volume, bucket, and special characters in keys + assertEquals("volume/bucket/key$%#1/./////////key$%#2", + OmUtils.normalizePathUptoBucket("volume/bucket/key$%#1/./////////key$%#2")); + } + + @Test + public void testConstructFullPath() throws IOException { + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName(KEY_TWO) + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_TWO_OBJECT_ID) + .build(); + String fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + String expectedPath = "vol/bucket1/" + KEY_TWO; + Assertions.assertEquals(expectedPath, fullPath); + + keyInfo = new OmKeyInfo.Builder() + .setKeyName(KEY_FIVE) + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(KEY_FIVE_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + expectedPath = "vol/bucket2/" + KEY_FIVE; + Assertions.assertEquals(expectedPath, fullPath); + + keyInfo = new OmKeyInfo.Builder() + .setKeyName(KEY_EIGHT) + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_THREE) + .setObjectID(KEY_EIGHT_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + expectedPath = "vol2/bucket3/" + KEY_EIGHT; + Assertions.assertEquals(expectedPath, fullPath); + + + keyInfo = new OmKeyInfo.Builder() + .setKeyName(KEY_ELEVEN) + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_FOUR) + .setObjectID(KEY_ELEVEN_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); + expectedPath = "vol2/bucket4/" + KEY_ELEVEN; + Assertions.assertEquals(expectedPath, fullPath); + } + + + /** + * Testing the following case. + * └── vol + * ├── bucket1 (OBS) + * │ ├── file1 + * │ ├── file2 + * │ └── file3 + * └── bucket2 (OBS) + * ├── file4 + * └── file5 + * └── vol2 + * ├── bucket3 (Legacy) + * │ ├── file8 + * │ ├── file9 + * │ └── file10 + * └── bucket4 (Legacy) + * └── file11 + * + * Write these keys to OM and + * replicate them. + * @throws Exception + */ + @SuppressWarnings("checkstyle:MethodLength") + private void populateOMDB() throws Exception { + + // write all keys + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_ONE_SIZE, + getOBSBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_TWO_SIZE, + getOBSBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_THREE_SIZE, + getOBSBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + FILE_FOUR_SIZE, + getOBSBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + FILE_FIVE_SIZE, + getOBSBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_EIGHT, + BUCKET_THREE, + VOL_TWO, + KEY_EIGHT, + KEY_EIGHT_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_EIGHT_SIZE, + getLegacyBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_NINE, + BUCKET_THREE, + VOL_TWO, + KEY_NINE, + KEY_NINE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_NINE_SIZE, + getLegacyBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TEN, + BUCKET_THREE, + VOL_TWO, + KEY_TEN, + KEY_TEN_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_TEN_SIZE, + getLegacyBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_ELEVEN, + BUCKET_FOUR, + VOL_TWO, + KEY_ELEVEN, + KEY_ELEVEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FOUR_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_ELEVEN_SIZE, + getLegacyBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static OMMetadataManager initializeNewOmMetadataManager( + File omDbDir, OzoneConfiguration omConfiguration) + throws IOException { + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false"); + OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .setQuotaInBytes(VOL_QUOTA) + .build(); + + String volume2Key = omMetadataManager.getVolumeKey(VOL_TWO); + OmVolumeArgs args2 = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_TWO_OBJECT_ID) + .setVolume(VOL_TWO) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .setQuotaInBytes(VOL_TWO_QUOTA) + .build(); + + omMetadataManager.getVolumeTable().put(volumeKey, args); + omMetadataManager.getVolumeTable().put(volume2Key, args2); + + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setQuotaInBytes(BUCKET_ONE_QUOTA) + .setBucketLayout(getOBSBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setQuotaInBytes(BUCKET_TWO_QUOTA) + .setBucketLayout(getOBSBucketLayout()) + .build(); + + OmBucketInfo bucketInfo3 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_THREE) + .setObjectID(BUCKET_THREE_OBJECT_ID) + .setQuotaInBytes(BUCKET_THREE_QUOTA) + .setBucketLayout(getLegacyBucketLayout()) + .build(); + + OmBucketInfo bucketInfo4 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_FOUR) + .setObjectID(BUCKET_FOUR_OBJECT_ID) + .setQuotaInBytes(BUCKET_FOUR_QUOTA) + .setBucketLayout(getLegacyBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo.getVolumeName(), bucketInfo.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + String bucketKey3 = omMetadataManager.getBucketKey( + bucketInfo3.getVolumeName(), bucketInfo3.getBucketName()); + String bucketKey4 = omMetadataManager.getBucketKey( + bucketInfo4.getVolumeName(), bucketInfo4.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3); + omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4); + + return omMetadataManager; + } + + private void setUpMultiBlockKey() throws IOException { + OmKeyLocationInfoGroup locationInfoGroup = + getLocationInfoGroup1(); + + // add the multi-block key to Recon's OM + writeKeyToOm(reconOMMetadataManager, + MULTI_BLOCK_FILE, + BUCKET_ONE, + VOL, + MULTI_BLOCK_FILE, + MULTI_BLOCK_KEY_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup), + getOBSBucketLayout(), + FILE_THREE_SIZE); + } + + private OmKeyLocationInfoGroup getLocationInfoGroup1() { + List locationInfoList = new ArrayList<>(); + BlockID block1 = new BlockID(CONTAINER_ONE_ID, 0L); + BlockID block2 = new BlockID(CONTAINER_TWO_ID, 0L); + BlockID block3 = new BlockID(CONTAINER_THREE_ID, 0L); + + OmKeyLocationInfo location1 = new OmKeyLocationInfo.Builder() + .setBlockID(block1) + .setLength(BLOCK_ONE_LENGTH) + .build(); + OmKeyLocationInfo location2 = new OmKeyLocationInfo.Builder() + .setBlockID(block2) + .setLength(BLOCK_TWO_LENGTH) + .build(); + OmKeyLocationInfo location3 = new OmKeyLocationInfo.Builder() + .setBlockID(block3) + .setLength(BLOCK_THREE_LENGTH) + .build(); + locationInfoList.add(location1); + locationInfoList.add(location2); + locationInfoList.add(location3); + + return new OmKeyLocationInfoGroup(0L, locationInfoList); + } + + + private OmKeyLocationInfoGroup getLocationInfoGroup2() { + List locationInfoList = new ArrayList<>(); + BlockID block4 = new BlockID(CONTAINER_FOUR_ID, 0L); + BlockID block5 = new BlockID(CONTAINER_FIVE_ID, 0L); + BlockID block6 = new BlockID(CONTAINER_SIX_ID, 0L); + + OmKeyLocationInfo location4 = new OmKeyLocationInfo.Builder() + .setBlockID(block4) + .setLength(BLOCK_FOUR_LENGTH) + .build(); + OmKeyLocationInfo location5 = new OmKeyLocationInfo.Builder() + .setBlockID(block5) + .setLength(BLOCK_FIVE_LENGTH) + .build(); + OmKeyLocationInfo location6 = new OmKeyLocationInfo.Builder() + .setBlockID(block6) + .setLength(BLOCK_SIX_LENGTH) + .build(); + locationInfoList.add(location4); + locationInfoList.add(location5); + locationInfoList.add(location6); + return new OmKeyLocationInfoGroup(0L, locationInfoList); + + } + + @SuppressWarnings("checkstyle:MethodLength") + private void setUpMultiBlockReplicatedKeys() throws IOException { + OmKeyLocationInfoGroup locationInfoGroup1 = + getLocationInfoGroup1(); + OmKeyLocationInfoGroup locationInfoGroup2 = + getLocationInfoGroup2(); + + //vol/bucket1/file1 + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getOBSBucketLayout(), + FILE_ONE_SIZE); + + //vol/bucket1/file2 + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getOBSBucketLayout(), + FILE_TWO_SIZE); + + //vol/bucket1/file3 + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getOBSBucketLayout(), + FILE_THREE_SIZE); + + //vol/bucket2/file4 + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getOBSBucketLayout(), + FILE_FOUR_SIZE); + + //vol/bucket2/file5 + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getOBSBucketLayout(), + FILE_FIVE_SIZE); + + //vol2/bucket3/file8 + writeKeyToOm(reconOMMetadataManager, + KEY_EIGHT, + BUCKET_THREE, + VOL_TWO, + KEY_EIGHT, + KEY_EIGHT_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getLegacyBucketLayout(), + FILE_EIGHT_SIZE); + + //vol2/bucket3/file9 + writeKeyToOm(reconOMMetadataManager, + KEY_NINE, + BUCKET_THREE, + VOL_TWO, + KEY_NINE, + KEY_NINE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getLegacyBucketLayout(), + FILE_NINE_SIZE); + + //vol2/bucket3/file10 + writeKeyToOm(reconOMMetadataManager, + KEY_TEN, + BUCKET_THREE, + VOL_TWO, + KEY_TEN, + KEY_TEN_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getLegacyBucketLayout(), + FILE_TEN_SIZE); + + //vol2/bucket4/file11 + writeKeyToOm(reconOMMetadataManager, + KEY_ELEVEN, + BUCKET_FOUR, + VOL_TWO, + KEY_ELEVEN, + KEY_ELEVEN_OBJECT_ID, + BUCKET_FOUR_OBJECT_ID, + BUCKET_FOUR_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getLegacyBucketLayout(), + FILE_ELEVEN_SIZE); + } + + /** + * Generate a set of mock container replica with a size of + * replication factor for container. + * + * @param replicationFactor number of replica + * @param containerID the container replicated based upon + * @return a set of container replica for testing + */ + private static Set generateMockContainerReplicas( + int replicationFactor, ContainerID containerID) { + Set result = new HashSet<>(); + for (int i = 0; i < replicationFactor; ++i) { + DatanodeDetails randomDatanode = randomDatanodeDetails(); + ContainerReplica replica = new ContainerReplica.ContainerReplicaBuilder() + .setContainerID(containerID) + .setContainerState( + StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN) + .setDatanodeDetails(randomDatanode) + .build(); + result.add(replica); + } + return result; + } + + private static ReconStorageContainerManagerFacade getMockReconSCM() + throws ContainerNotFoundException { + ReconStorageContainerManagerFacade reconSCM = + mock(ReconStorageContainerManagerFacade.class); + ContainerManager containerManager = mock(ContainerManager.class); + + // Container 1 is 3-way replicated + ContainerID containerID1 = new ContainerID(CONTAINER_ONE_ID); + Set containerReplicas1 = generateMockContainerReplicas( + CONTAINER_ONE_REPLICA_COUNT, containerID1); + when(containerManager.getContainerReplicas(containerID1)) + .thenReturn(containerReplicas1); + + // Container 2 is under replicated with 2 replica + ContainerID containerID2 = new ContainerID(CONTAINER_TWO_ID); + Set containerReplicas2 = generateMockContainerReplicas( + CONTAINER_TWO_REPLICA_COUNT, containerID2); + when(containerManager.getContainerReplicas(containerID2)) + .thenReturn(containerReplicas2); + + // Container 3 is over replicated with 4 replica + ContainerID containerID3 = new ContainerID(CONTAINER_THREE_ID); + Set containerReplicas3 = generateMockContainerReplicas( + CONTAINER_THREE_REPLICA_COUNT, containerID3); + when(containerManager.getContainerReplicas(containerID3)) + .thenReturn(containerReplicas3); + + // Container 4 is replicated with 5 replica + ContainerID containerID4 = new ContainerID(CONTAINER_FOUR_ID); + Set containerReplicas4 = generateMockContainerReplicas( + CONTAINER_FOUR_REPLICA_COUNT, containerID4); + when(containerManager.getContainerReplicas(containerID4)) + .thenReturn(containerReplicas4); + + // Container 5 is replicated with 2 replica + ContainerID containerID5 = new ContainerID(CONTAINER_FIVE_ID); + Set containerReplicas5 = generateMockContainerReplicas( + CONTAINER_FIVE_REPLICA_COUNT, containerID5); + when(containerManager.getContainerReplicas(containerID5)) + .thenReturn(containerReplicas5); + + // Container 6 is replicated with 3 replica + ContainerID containerID6 = new ContainerID(CONTAINER_SIX_ID); + Set containerReplicas6 = generateMockContainerReplicas( + CONTAINER_SIX_REPLICA_COUNT, containerID6); + when(containerManager.getContainerReplicas(containerID6)) + .thenReturn(containerReplicas6); + + when(reconSCM.getContainerManager()).thenReturn(containerManager); + ReconNodeManager mockReconNodeManager = mock(ReconNodeManager.class); + when(mockReconNodeManager.getStats()).thenReturn(getMockSCMRootStat()); + when(reconSCM.getScmNodeManager()).thenReturn(mockReconNodeManager); + return reconSCM; + } + + private static BucketLayout getOBSBucketLayout() { + return BucketLayout.OBJECT_STORE; + } + + private static BucketLayout getLegacyBucketLayout() { + return BucketLayout.LEGACY; + } + + private static SCMNodeStat getMockSCMRootStat() { + return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, + ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java index 8a9452a86297..f64d93707a2c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java @@ -67,8 +67,8 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyString; @@ -329,16 +329,14 @@ public void setUp() throws Exception { .addStorageReport(storageReportProto1) .addStorageReport(storageReportProto2).build(); - try { + assertDoesNotThrow(() -> { reconScm.getDatanodeProtocolServer() - .register(extendedDatanodeDetailsProto, nodeReportProto, - containerReportsProto, pipelineReportsProto, - defaultLayoutVersionProto()); + .register(extendedDatanodeDetailsProto, nodeReportProto, + containerReportsProto, pipelineReportsProto, + defaultLayoutVersionProto()); // Process all events in the event queue reconScm.getEventQueue().processAll(1000); - } catch (Exception ex) { - fail(ex.getMessage()); - } + }); } @Test @@ -421,16 +419,14 @@ private void updateContainerReport(long containerId) { .setOriginNodeId(datanodeId) .build()) .build(); - try { + assertDoesNotThrow(() -> { reconScm.getDatanodeProtocolServer() - .register(extendedDatanodeDetailsProto, nodeReportProto, - containerReportsProto, pipelineReportsProto, - defaultLayoutVersionProto()); + .register(extendedDatanodeDetailsProto, nodeReportProto, + containerReportsProto, pipelineReportsProto, + defaultLayoutVersionProto()); // Process all events in the event queue reconScm.getEventQueue().processAll(1000); - } catch (Exception ex) { - fail(ex.getMessage()); - } + }); } private void waitAndCheckConditionAfterHeartbeat(Callable check) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java index 753804e5fab0..8b35bfdd4d2a 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java @@ -38,8 +38,8 @@ import javax.ws.rs.core.Response; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -63,10 +63,14 @@ private OmPrefixInfo getOmPrefixInfoForTest( String identityString, IAccessAuthorizer.ACLType aclType, OzoneAcl.AclScope scope) { - return new OmPrefixInfo(path, - Collections.singletonList(new OzoneAcl( + return OmPrefixInfo.newBuilder() + .setName(path) + .setAcls(new ArrayList<>(Collections.singletonList(new OzoneAcl( identityType, identityString, - aclType, scope)), new HashMap<>(), 10, 100); + scope, aclType)))) + .setObjectID(10) + .setUpdateID(100) + .build(); } public void testNSSummaryBasicInfoRoot( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 371fb6f9d675..8647639dd134 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.fsck; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.assertj.core.api.Assertions.assertThat; import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -38,6 +39,7 @@ import java.util.UUID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicatedReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.TestContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; @@ -110,38 +113,61 @@ public void testRun() throws Exception { when(scmClientMock.getContainerWithPipeline(c.getContainerID())) .thenReturn(new ContainerWithPipeline(c, null)); } + + ReplicatedReplicationConfig replicationConfig = RatisReplicationConfig.getInstance(THREE); // Under replicated - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) + ContainerInfo containerInfo1 = + TestContainerInfo.newBuilderForTest().setContainerID(1).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(1L))).thenReturn(containerInfo1); + when(containerManagerMock.getContainerReplicas(containerInfo1.containerID())) .thenReturn(getMockReplicas(1L, State.CLOSED, State.UNHEALTHY)); // return all UNHEALTHY replicas for container ID 2 -> UNDER_REPLICATED - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L))) + ContainerInfo containerInfo2 = + TestContainerInfo.newBuilderForTest().setContainerID(2).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(2L))).thenReturn(containerInfo2); + when(containerManagerMock.getContainerReplicas(containerInfo2.containerID())) .thenReturn(getMockReplicas(2L, State.UNHEALTHY)); - // return 0 replicas for container ID 3 -> Missing - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) + // return 0 replicas for container ID 3 -> Empty Missing + ContainerInfo containerInfo3 = + TestContainerInfo.newBuilderForTest().setContainerID(3).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(3L))).thenReturn(containerInfo3); + when(containerManagerMock.getContainerReplicas(containerInfo3.containerID())) .thenReturn(Collections.emptySet()); // Return 5 Healthy -> Over replicated - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L))) + ContainerInfo containerInfo4 = + TestContainerInfo.newBuilderForTest().setContainerID(4).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(4L))).thenReturn(containerInfo4); + when(containerManagerMock.getContainerReplicas(containerInfo4.containerID())) .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED)); // Mis-replicated + ContainerInfo containerInfo5 = + TestContainerInfo.newBuilderForTest().setContainerID(5).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(5L))).thenReturn(containerInfo5); Set misReplicas = getMockReplicas(5L, State.CLOSED, State.CLOSED, State.CLOSED); placementMock.setMisRepWhenDnPresent( misReplicas.iterator().next().getDatanodeDetails().getUuid()); - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(5L))) + when(containerManagerMock.getContainerReplicas(containerInfo5.containerID())) .thenReturn(misReplicas); // Return 3 Healthy -> Healthy container - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(6L))) + ContainerInfo containerInfo6 = + TestContainerInfo.newBuilderForTest().setContainerID(6).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(6L))).thenReturn(containerInfo6); + when(containerManagerMock.getContainerReplicas(containerInfo6.containerID())) .thenReturn(getMockReplicas(6L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 7 -> EMPTY_MISSING - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(7L))) + // return 0 replicas for container ID 7 -> MISSING + ContainerInfo containerInfo7 = + TestContainerInfo.newBuilderForTest().setContainerID(7).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(7L))).thenReturn(containerInfo7); + when(containerManagerMock.getContainerReplicas(containerInfo7.containerID())) .thenReturn(Collections.emptySet()); List all = unHealthyContainersTableHandle.findAll(); @@ -150,7 +176,7 @@ public void testRun() throws Exception { long currentTime = System.currentTimeMillis(); ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); - reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); + reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(5)); when(reconContainerMetadataManager.getKeyCountForContainer( 7L)).thenReturn(5L); ContainerHealthTask containerHealthTask = @@ -215,7 +241,7 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(2L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 3 -> Still Missing + // return 0 replicas for container ID 3 -> Still empty Missing when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) .thenReturn(Collections.emptySet()); @@ -227,7 +253,7 @@ public void testRun() throws Exception { // Was mis-replicated - make it healthy now placementMock.setMisRepWhenDnPresent(null); - LambdaTestUtils.await(6000, 1000, () -> + LambdaTestUtils.await(60000, 1000, () -> (unHealthyContainersTableHandle.count() == 4)); rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("UNDER_REPLICATED", rec.getContainerState()); @@ -252,6 +278,21 @@ public void testRun() throws Exception { // This container is now healthy, it should not be in the table any more assertEquals(0, unHealthyContainersTableHandle.fetchByContainerId(5L).size()); + + // Again make container Id 7 as empty which was missing as well, so in next + // container health task run, this container also should be deleted from + // UNHEALTHY_CONTAINERS table because we want to cleanup any existing + // EMPTY and MISSING containers from UNHEALTHY_CONTAINERS table. + when(reconContainerMetadataManager.getKeyCountForContainer(7L)).thenReturn(0L); + LambdaTestUtils.await(6000, 1000, () -> { + UnhealthyContainers emptyMissingContainer = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); + return ("EMPTY_MISSING".equals(emptyMissingContainer.getContainerState())); + }); + + // Just check once again that count doesn't change, only state of + // container 7 changes from MISSING to EMPTY_MISSING + LambdaTestUtils.await(60000, 1000, () -> + (unHealthyContainersTableHandle.count() == 4)); } @Test @@ -343,6 +384,65 @@ public void testDeletedContainer() throws Exception { .isGreaterThan(currentTime); } + @Test + public void testNegativeSizeContainers() throws Exception { + // Setup mock objects and test environment + UnhealthyContainersDao unhealthyContainersDao = + getDao(UnhealthyContainersDao.class); + ContainerHealthSchemaManager containerHealthSchemaManager = + new ContainerHealthSchemaManager( + getSchemaDefinition(ContainerSchemaDefinition.class), + unhealthyContainersDao); + ReconStorageContainerManagerFacade scmMock = + mock(ReconStorageContainerManagerFacade.class); + ContainerManager containerManagerMock = mock(ContainerManager.class); + StorageContainerServiceProvider scmClientMock = + mock(StorageContainerServiceProvider.class); + ReconContainerMetadataManager reconContainerMetadataManager = + mock(ReconContainerMetadataManager.class); + MockPlacementPolicy placementMock = new MockPlacementPolicy(); + + // Mock container info setup + List mockContainers = getMockContainers(3); + when(scmMock.getContainerManager()).thenReturn(containerManagerMock); + when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); + when(containerManagerMock.getContainers(any(ContainerID.class), + anyInt())).thenReturn(mockContainers); + for (ContainerInfo c : mockContainers) { + when(containerManagerMock.getContainer( + c.containerID())).thenReturn(c); + when(scmClientMock.getContainerWithPipeline( + c.getContainerID())).thenReturn(new ContainerWithPipeline(c, null)); + when(containerManagerMock.getContainer(c.containerID()) + .getUsedBytes()).thenReturn(Long.valueOf(-10)); + } + + // Verify the table is initially empty + assertThat(unhealthyContainersDao.findAll()).isEmpty(); + + // Setup and start the container health task + ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); + ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); + reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); + ContainerHealthTask containerHealthTask = new ContainerHealthTask( + scmMock.getContainerManager(), scmMock.getScmServiceProvider(), + reconTaskStatusDao, + containerHealthSchemaManager, placementMock, reconTaskConfig, + reconContainerMetadataManager, + new OzoneConfiguration()); + containerHealthTask.start(); + + // Wait for the task to identify unhealthy containers + LambdaTestUtils.await(6000, 1000, + () -> unhealthyContainersDao.count() == 3); + + // Assert that all unhealthy containers have been identified as NEGATIVE_SIZE states + List negativeSizeContainers = + unhealthyContainersDao.fetchByContainerState("NEGATIVE_SIZE"); + assertThat(negativeSizeContainers).hasSize(3); + } + + private Set getMockReplicas( long containerId, State...states) { Set replicas = new HashSet<>(); @@ -364,9 +464,9 @@ private List getMockContainers(int num) { when(c.getContainerID()).thenReturn((long)i); when(c.getReplicationConfig()) .thenReturn(RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.THREE)); + THREE)); when(c.getReplicationFactor()) - .thenReturn(HddsProtos.ReplicationFactor.THREE); + .thenReturn(THREE); when(c.getState()).thenReturn(HddsProtos.LifeCycleState.CLOSED); when(c.containerID()).thenReturn(ContainerID.valueOf(i)); containers.add(c); @@ -379,7 +479,7 @@ private ContainerInfo getMockDeletedContainer(int containerID) { when(c.getContainerID()).thenReturn((long)containerID); when(c.getReplicationConfig()) .thenReturn(RatisReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.THREE)); + .getInstance(THREE)); when(c.containerID()).thenReturn(ContainerID.valueOf(containerID)); when(c.getState()).thenReturn(HddsProtos.LifeCycleState.DELETED); return c; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/heatmap/TestHeatMapInfo.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/heatmap/TestHeatMapInfo.java index 856b556e8af2..40d7cbd47753 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/heatmap/TestHeatMapInfo.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/heatmap/TestHeatMapInfo.java @@ -18,11 +18,10 @@ package org.apache.hadoop.ozone.recon.heatmap; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; -import com.google.gson.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; +import org.apache.hadoop.hdds.JsonTestUtils; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.api.types.EntityMetaData; import org.apache.hadoop.ozone.recon.api.types.EntityReadAccessHeatMapResponse; @@ -745,35 +744,39 @@ public void setUp() throws Exception { public void testHeatMapGeneratedInfo() throws IOException { // Setup // Run the test - JsonElement jsonElement = JsonParser.parseString(auditRespStr); - JsonObject jsonObject = jsonElement.getAsJsonObject(); - JsonElement facets = jsonObject.get("facets"); - JsonObject facetsBucketsObject = - facets.getAsJsonObject().get("resources") - .getAsJsonObject(); - ObjectMapper objectMapper = new ObjectMapper(); + // Parse the JSON string to JsonNode + JsonNode rootNode = JsonUtils.readTree(auditRespStr); + JsonNode facetsNode = rootNode.path("facets"); + JsonNode resourcesNode = facetsNode.path("resources"); + + // Deserialize the resources node directly if it's not missing HeatMapProviderDataResource auditLogFacetsResources = - objectMapper.readValue( - facetsBucketsObject.toString(), HeatMapProviderDataResource.class); - EntityMetaData[] entities = auditLogFacetsResources.getMetaDataList(); - List entityMetaDataList = - Arrays.stream(entities).collect(Collectors.toList()); - EntityReadAccessHeatMapResponse entityReadAccessHeatMapResponse = - heatMapUtil.generateHeatMap(entityMetaDataList); - assertThat(entityReadAccessHeatMapResponse.getChildren().size()).isGreaterThan(0); - assertEquals(12, entityReadAccessHeatMapResponse.getChildren().size()); - assertEquals(25600, entityReadAccessHeatMapResponse.getSize()); - assertEquals(2924, entityReadAccessHeatMapResponse.getMinAccessCount()); - assertEquals(155074, entityReadAccessHeatMapResponse.getMaxAccessCount()); - assertEquals("root", entityReadAccessHeatMapResponse.getLabel()); - assertEquals(0.0, entityReadAccessHeatMapResponse.getChildren().get(0).getColor()); - assertEquals(0.442, - entityReadAccessHeatMapResponse.getChildren().get(0).getChildren() - .get(0).getChildren().get(1).getColor()); - assertEquals(0.058, - entityReadAccessHeatMapResponse.getChildren().get(0).getChildren() - .get(1).getChildren().get(3).getColor()); + JsonTestUtils.treeToValue(resourcesNode, HeatMapProviderDataResource.class); + + if (auditLogFacetsResources != null) { + EntityMetaData[] entities = auditLogFacetsResources.getMetaDataList(); + List entityMetaDataList = + Arrays.stream(entities).collect(Collectors.toList()); + EntityReadAccessHeatMapResponse entityReadAccessHeatMapResponse = + heatMapUtil.generateHeatMap(entityMetaDataList); + assertThat( + entityReadAccessHeatMapResponse.getChildren().size()).isGreaterThan( + 0); + assertEquals(12, entityReadAccessHeatMapResponse.getChildren().size()); + assertEquals(25600, entityReadAccessHeatMapResponse.getSize()); + assertEquals(2924, entityReadAccessHeatMapResponse.getMinAccessCount()); + assertEquals(155074, entityReadAccessHeatMapResponse.getMaxAccessCount()); + assertEquals("root", entityReadAccessHeatMapResponse.getLabel()); + assertEquals(0.0, + entityReadAccessHeatMapResponse.getChildren().get(0).getColor()); + assertEquals(0.442, + entityReadAccessHeatMapResponse.getChildren().get(0).getChildren() + .get(0).getChildren().get(1).getColor()); + assertEquals(0.058, + entityReadAccessHeatMapResponse.getChildren().get(0).getChildren() + .get(1).getChildren().get(3).getColor()); + } } @Test @@ -831,54 +834,51 @@ public void testHeatMapInfoResponseWithEntityTypeVolume() throws IOException { " }\n" + " }\n" + "}"; - JsonElement jsonElement = - JsonParser.parseString(auditRespStrWithVolumeEntityType); - JsonObject jsonObject = jsonElement.getAsJsonObject(); - JsonElement facets = jsonObject.get("facets"); - JsonElement resources = facets.getAsJsonObject().get("resources"); - JsonObject facetsBucketsObject = new JsonObject(); - if (null != resources) { - facetsBucketsObject = resources.getAsJsonObject(); - } - ObjectMapper objectMapper = new ObjectMapper(); + JsonNode rootNode = JsonUtils.readTree(auditRespStrWithVolumeEntityType); + JsonNode facetsNode = rootNode.path("facets"); + JsonNode resourcesNode = facetsNode.path("resources"); + + // Deserialize the resources node directly if it's not missing HeatMapProviderDataResource auditLogFacetsResources = - objectMapper.readValue( - facetsBucketsObject.toString(), HeatMapProviderDataResource.class); - EntityMetaData[] entities = auditLogFacetsResources.getMetaDataList(); - if (null != entities && entities.length > 0) { - List entityMetaDataList = - Arrays.stream(entities).collect(Collectors.toList()); - // Below heatmap response would be of format like: - //{ - // "label": "root", - // "path": "/", - // "children": [ - // { - // "label": "s3v", - // "path": "s3v", - // "size": 256 - // }, - // { - // "label": "testnewvol2", - // "path": "testnewvol2", - // "size": 256 - // } - // ], - // "size": 512, - // "minAccessCount": 19263 - //} - EntityReadAccessHeatMapResponse entityReadAccessHeatMapResponse = - heatMapUtil.generateHeatMap(entityMetaDataList); - assertThat(entityReadAccessHeatMapResponse.getChildren().size()).isGreaterThan(0); - assertEquals(2, entityReadAccessHeatMapResponse.getChildren().size()); - assertEquals(512, entityReadAccessHeatMapResponse.getSize()); - assertEquals(8590, entityReadAccessHeatMapResponse.getMinAccessCount()); - assertEquals(19263, entityReadAccessHeatMapResponse.getMaxAccessCount()); - assertEquals(1.0, entityReadAccessHeatMapResponse.getChildren().get(0).getColor()); - assertEquals("root", entityReadAccessHeatMapResponse.getLabel()); - } else { - assertNull(entities); + JsonTestUtils.treeToValue(resourcesNode, HeatMapProviderDataResource.class); + + if (auditLogFacetsResources != null) { + EntityMetaData[] entities = auditLogFacetsResources.getMetaDataList(); + if (null != entities && entities.length > 0) { + List entityMetaDataList = + Arrays.stream(entities).collect(Collectors.toList()); + // Below heatmap response would be of format like: + //{ + // "label": "root", + // "path": "/", + // "children": [ + // { + // "label": "s3v", + // "path": "s3v", + // "size": 256 + // }, + // { + // "label": "testnewvol2", + // "path": "testnewvol2", + // "size": 256 + // } + // ], + // "size": 512, + // "minAccessCount": 19263 + //} + EntityReadAccessHeatMapResponse entityReadAccessHeatMapResponse = + heatMapUtil.generateHeatMap(entityMetaDataList); + assertThat(entityReadAccessHeatMapResponse.getChildren().size()).isGreaterThan(0); + assertEquals(2, entityReadAccessHeatMapResponse.getChildren().size()); + assertEquals(512, entityReadAccessHeatMapResponse.getSize()); + assertEquals(8590, entityReadAccessHeatMapResponse.getMinAccessCount()); + assertEquals(19263, entityReadAccessHeatMapResponse.getMaxAccessCount()); + assertEquals(1.0, entityReadAccessHeatMapResponse.getChildren().get(0).getColor()); + assertEquals("root", entityReadAccessHeatMapResponse.getLabel()); + } else { + assertNull(entities); + } } } @@ -965,150 +965,150 @@ public void testHeatMapInfoResponseWithEntityTypeBucket() throws IOException { " }\n" + " }\n" + "}"; - JsonElement jsonElement = - JsonParser.parseString(auditRespStrWithPathAndBucketEntityType); - JsonObject jsonObject = jsonElement.getAsJsonObject(); - JsonElement facets = jsonObject.get("facets"); - JsonElement resources = facets.getAsJsonObject().get("resources"); - JsonObject facetsBucketsObject = new JsonObject(); - if (null != resources) { - facetsBucketsObject = resources.getAsJsonObject(); - } - ObjectMapper objectMapper = new ObjectMapper(); - HeatMapProviderDataResource auditLogFacetsResources = - objectMapper.readValue( - facetsBucketsObject.toString(), HeatMapProviderDataResource.class); - EntityMetaData[] entities = auditLogFacetsResources.getMetaDataList(); - if (null != entities && entities.length > 0) { - List entityMetaDataList = - Arrays.stream(entities).collect(Collectors.toList()); - // Below heatmap response would be of format like: - //{ - // "label": "root", - // "path": "/", - // "children": [ - // { - // "label": "testnewvol2", - // "path": "testnewvol2", - // "children": [ - // { - // "label": "fsobuck11", - // "path": "/testnewvol2/fsobuck11", - // "children": [ - // { - // "label": "", - // "path": "/testnewvol2/fsobuck11/", - // "size": 100, - // "accessCount": 701, - // "color": 1.0 - // } - // ], - // "size": 100, - // "minAccessCount": 701, - // "maxAccessCount": 701 - // }, - // { - // "label": "fsobuck12", - // "path": "/testnewvol2/fsobuck12", - // "children": [ - // { - // "label": "", - // "path": "/testnewvol2/fsobuck12/", - // "size": 100, - // "accessCount": 701, - // "color": 1.0 - // } - // ], - // "size": 100, - // "minAccessCount": 701, - // "maxAccessCount": 701 - // }, - // { - // "label": "fsobuck13", - // "path": "/testnewvol2/fsobuck13", - // "children": [ - // { - // "label": "", - // "path": "/testnewvol2/fsobuck13/", - // "size": 100, - // "accessCount": 701, - // "color": 1.0 - // } - // ], - // "size": 100, - // "minAccessCount": 701, - // "maxAccessCount": 701 - // }, - // { - // "label": "obsbuck11", - // "path": "/testnewvol2/obsbuck11", - // "children": [ - // { - // "label": "", - // "path": "/testnewvol2/obsbuck11/", - // "size": 107, - // "accessCount": 263, - // "color": 1.0 - // } - // ], - // "size": 107, - // "minAccessCount": 263, - // "maxAccessCount": 263 - // }, - // { - // "label": "obsbuck12", - // "path": "/testnewvol2/obsbuck12", - // "children": [ - // { - // "label": "", - // "path": "/testnewvol2/obsbuck12/", - // "size": 100, - // "accessCount": 200, - // "color": 1.0 - // } - // ], - // "size": 100, - // "minAccessCount": 200, - // "maxAccessCount": 200 - // }, - // { - // "label": "obsbuck13", - // "path": "/testnewvol2/obsbuck13", - // "children": [ - // { - // "label": "", - // "path": "/testnewvol2/obsbuck13/", - // "size": 100, - // "accessCount": 200, - // "color": 1.0 - // } - // ], - // "size": 100, - // "minAccessCount": 200, - // "maxAccessCount": 200 - // } - // ], - // "size": 607 - // } - // ], - // "size": 607, - // "minAccessCount": 200, - // "maxAccessCount": 701 - //} - EntityReadAccessHeatMapResponse entityReadAccessHeatMapResponse = - heatMapUtil.generateHeatMap(entityMetaDataList); - assertThat(entityReadAccessHeatMapResponse.getChildren().size()).isGreaterThan(0); - assertEquals(2, - entityReadAccessHeatMapResponse.getChildren().size()); - assertEquals(0.0, - entityReadAccessHeatMapResponse.getChildren().get(0).getColor()); - String path = - entityReadAccessHeatMapResponse.getChildren().get(1).getChildren() - .get(0).getPath(); - assertEquals("/testnewvol2/fsobuck11", path); - } else { - assertNull(entities); + JsonNode rootNode = JsonUtils.readTree(auditRespStrWithPathAndBucketEntityType); + // Navigate to the nested JSON objects + JsonNode facetsNode = rootNode.path("facets"); + JsonNode resourcesNode = facetsNode.path("resources"); + // Deserialize the resources node directly if it's not missing + HeatMapProviderDataResource auditLogFacetsResources = null; + auditLogFacetsResources = + JsonTestUtils.treeToValue(resourcesNode, HeatMapProviderDataResource.class); + + if (auditLogFacetsResources != null) { + EntityMetaData[] entities = auditLogFacetsResources.getMetaDataList(); + if (null != entities && entities.length > 0) { + List entityMetaDataList = + Arrays.stream(entities).collect(Collectors.toList()); + // Below heatmap response would be of format like: + //{ + // "label": "root", + // "path": "/", + // "children": [ + // { + // "label": "testnewvol2", + // "path": "testnewvol2", + // "children": [ + // { + // "label": "fsobuck11", + // "path": "/testnewvol2/fsobuck11", + // "children": [ + // { + // "label": "", + // "path": "/testnewvol2/fsobuck11/", + // "size": 100, + // "accessCount": 701, + // "color": 1.0 + // } + // ], + // "size": 100, + // "minAccessCount": 701, + // "maxAccessCount": 701 + // }, + // { + // "label": "fsobuck12", + // "path": "/testnewvol2/fsobuck12", + // "children": [ + // { + // "label": "", + // "path": "/testnewvol2/fsobuck12/", + // "size": 100, + // "accessCount": 701, + // "color": 1.0 + // } + // ], + // "size": 100, + // "minAccessCount": 701, + // "maxAccessCount": 701 + // }, + // { + // "label": "fsobuck13", + // "path": "/testnewvol2/fsobuck13", + // "children": [ + // { + // "label": "", + // "path": "/testnewvol2/fsobuck13/", + // "size": 100, + // "accessCount": 701, + // "color": 1.0 + // } + // ], + // "size": 100, + // "minAccessCount": 701, + // "maxAccessCount": 701 + // }, + // { + // "label": "obsbuck11", + // "path": "/testnewvol2/obsbuck11", + // "children": [ + // { + // "label": "", + // "path": "/testnewvol2/obsbuck11/", + // "size": 107, + // "accessCount": 263, + // "color": 1.0 + // } + // ], + // "size": 107, + // "minAccessCount": 263, + // "maxAccessCount": 263 + // }, + // { + // "label": "obsbuck12", + // "path": "/testnewvol2/obsbuck12", + // "children": [ + // { + // "label": "", + // "path": "/testnewvol2/obsbuck12/", + // "size": 100, + // "accessCount": 200, + // "color": 1.0 + // } + // ], + // "size": 100, + // "minAccessCount": 200, + // "maxAccessCount": 200 + // }, + // { + // "label": "obsbuck13", + // "path": "/testnewvol2/obsbuck13", + // "children": [ + // { + // "label": "", + // "path": "/testnewvol2/obsbuck13/", + // "size": 100, + // "accessCount": 200, + // "color": 1.0 + // } + // ], + // "size": 100, + // "minAccessCount": 200, + // "maxAccessCount": 200 + // } + // ], + // "size": 607 + // } + // ], + // "size": 607, + // "minAccessCount": 200, + // "maxAccessCount": 701 + //} + EntityReadAccessHeatMapResponse entityReadAccessHeatMapResponse = + heatMapUtil.generateHeatMap(entityMetaDataList); + assertThat( + entityReadAccessHeatMapResponse.getChildren().size()).isGreaterThan( + 0); + assertEquals(2, + entityReadAccessHeatMapResponse.getChildren().size()); + assertEquals(0.0, + entityReadAccessHeatMapResponse.getChildren().get(0).getColor()); + String path = + entityReadAccessHeatMapResponse.getChildren().get(1).getChildren() + .get(0).getPath(); + assertEquals("/testnewvol2/fsobuck11", path); + } else { + assertNull(entities); + } } } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java index efde79f9bacb..3c572aa8e052 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopology; @@ -55,6 +56,7 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; +import org.apache.hadoop.ozone.recon.TestReconUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -165,6 +167,31 @@ public void testProcessICRStateMismatch() } } + @Test + public void testMergeMultipleICRs() { + final ContainerInfo container = TestReconUtils.getContainer(LifeCycleState.OPEN); + final DatanodeDetails datanodeOne = randomDatanodeDetails(); + final IncrementalContainerReportProto containerReport = + getIncrementalContainerReportProto(container.containerID(), + ContainerReplicaProto.State.CLOSED, + datanodeOne.getUuidString()); + final IncrementalContainerReportFromDatanode icrFromDatanode1 = + new IncrementalContainerReportFromDatanode( + datanodeOne, containerReport); + final IncrementalContainerReportFromDatanode icrFromDatanode2 = + new IncrementalContainerReportFromDatanode( + datanodeOne, containerReport); + assertEquals(1, icrFromDatanode1.getReport().getReportList().size()); + icrFromDatanode1.mergeReport(icrFromDatanode2); + assertEquals(2, icrFromDatanode1.getReport().getReportList().size()); + + final IncrementalContainerReportFromDatanode icrFromDatanode3 = + new IncrementalContainerReportFromDatanode( + datanodeOne, containerReport); + icrFromDatanode1.mergeReport(icrFromDatanode3); + assertEquals(3, icrFromDatanode1.getReport().getReportList().size()); + } + private LifeCycleState getContainerStateFromReplicaState( State state) { switch (state) { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java index 99bb482cb51e..02207f9c6209 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java @@ -23,8 +23,11 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.ratis.util.Preconditions.assertTrue; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.mockito.Mockito.eq; @@ -39,6 +42,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; @@ -48,9 +52,11 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand; +import org.apache.hadoop.ozone.recon.ReconContext; import org.apache.hadoop.ozone.recon.ReconUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -69,16 +75,19 @@ public class TestReconNodeManager { private DBStore store; private ReconStorageConfig reconStorageConfig; private HDDSLayoutVersionManager versionManager; + private ReconContext reconContext; @BeforeEach public void setUp() throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_METADATA_DIRS, temporaryFolder.toAbsolutePath().toString()); conf.set(OZONE_SCM_NAMES, "localhost"); - reconStorageConfig = new ReconStorageConfig(conf, new ReconUtils()); + ReconUtils reconUtils = new ReconUtils(); + reconStorageConfig = new ReconStorageConfig(conf, reconUtils); versionManager = new HDDSLayoutVersionManager( reconStorageConfig.getLayoutVersion()); store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); + reconContext = new ReconContext(conf, reconUtils); } @AfterEach @@ -86,6 +95,37 @@ public void tearDown() throws Exception { store.close(); } + @Test + public void testReconNodeManagerInitWithInvalidNetworkTopology() throws IOException { + ReconUtils reconUtils = new ReconUtils(); + ReconStorageConfig scmStorageConfig = + new ReconStorageConfig(conf, reconUtils); + EventQueue eventQueue = new EventQueue(); + NetworkTopology clusterMap = new NetworkTopologyImpl(conf); + Table nodeTable = + ReconSCMDBDefinition.NODES.getTable(store); + ReconNodeManager reconNodeManager = new ReconNodeManager(conf, + scmStorageConfig, eventQueue, clusterMap, nodeTable, versionManager, reconContext); + assertThat(reconNodeManager.getAllNodes()).isEmpty(); + + DatanodeDetails datanodeDetails = randomDatanodeDetails(); + // Updating the node's topology depth to make it invalid. + datanodeDetails.setNetworkLocation("/default-rack/xyz/"); + String uuidString = datanodeDetails.getUuidString(); + + // Register a random datanode. + RegisteredCommand register = reconNodeManager.register(datanodeDetails, null, null); + assertNotNull(register); + assertEquals(StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.errorNodeNotPermitted, + register.getError()); + assertEquals(reconContext.getClusterId(), register.getClusterID()); + assertFalse(reconContext.isHealthy().get()); + assertTrue(reconContext.getErrors().get(0).equals(ReconContext.ErrorCode.INVALID_NETWORK_TOPOLOGY)); + + assertEquals(0, reconNodeManager.getAllNodes().size()); + assertNull(reconNodeManager.getNodeByUuid(uuidString)); + } + @Test public void testReconNodeDB() throws IOException, NodeNotFoundException { ReconStorageConfig scmStorageConfig = @@ -95,7 +135,7 @@ public void testReconNodeDB() throws IOException, NodeNotFoundException { Table nodeTable = ReconSCMDBDefinition.NODES.getTable(store); ReconNodeManager reconNodeManager = new ReconNodeManager(conf, - scmStorageConfig, eventQueue, clusterMap, nodeTable, versionManager); + scmStorageConfig, eventQueue, clusterMap, nodeTable, versionManager, reconContext); ReconNewNodeHandler reconNewNodeHandler = new ReconNewNodeHandler(reconNodeManager); assertThat(reconNodeManager.getAllNodes()).isEmpty(); @@ -162,7 +202,7 @@ public void testReconNodeDB() throws IOException, NodeNotFoundException { eventQueue.close(); reconNodeManager.close(); reconNodeManager = new ReconNodeManager(conf, scmStorageConfig, eventQueue, - clusterMap, nodeTable, versionManager); + clusterMap, nodeTable, versionManager, reconContext); // Verify that the node information was persisted and loaded back. assertEquals(1, reconNodeManager.getAllNodes().size()); @@ -179,7 +219,7 @@ public void testUpdateNodeOperationalStateFromScm() throws Exception { Table nodeTable = ReconSCMDBDefinition.NODES.getTable(store); ReconNodeManager reconNodeManager = new ReconNodeManager(conf, - scmStorageConfig, eventQueue, clusterMap, nodeTable, versionManager); + scmStorageConfig, eventQueue, clusterMap, nodeTable, versionManager, reconContext); DatanodeDetails datanodeDetails = randomDatanodeDetails(); @@ -213,7 +253,7 @@ public void testDatanodeUpdate() throws IOException { Table nodeTable = ReconSCMDBDefinition.NODES.getTable(store); ReconNodeManager reconNodeManager = new ReconNodeManager(conf, - scmStorageConfig, eventQueue, clusterMap, nodeTable, versionManager); + scmStorageConfig, eventQueue, clusterMap, nodeTable, versionManager, reconContext); ReconNewNodeHandler reconNewNodeHandler = new ReconNewNodeHandler(reconNodeManager); assertThat(reconNodeManager.getAllNodes()).isEmpty(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java index d15cd6142d3c..032bff80ade3 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java @@ -297,8 +297,8 @@ public void testGetAndApplyDeltaUpdatesFromOM( OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); assertEquals(4.0, - metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0); - assertEquals(1, metrics.getNumNonZeroDeltaRequests().value()); + metrics.getAverageNumUpdatesInDeltaRequest(), 0.0); + assertEquals(1, metrics.getNumNonZeroDeltaRequests()); // In this method, we have to assert the "GET" path and the "APPLY" path. @@ -372,8 +372,8 @@ public void testGetAndApplyDeltaUpdatesFromOMWithLimit( OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); assertEquals(1.0, - metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0); - assertEquals(3, metrics.getNumNonZeroDeltaRequests().value()); + metrics.getAverageNumUpdatesInDeltaRequest(), 0.0); + assertEquals(3, metrics.getNumNonZeroDeltaRequests()); // In this method, we have to assert the "GET" path and the "APPLY" path. @@ -417,7 +417,7 @@ public void testSyncDataFromOMFullSnapshot( reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol); OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); - assertEquals(0, metrics.getNumSnapshotRequests().value()); + assertEquals(0, metrics.getNumSnapshotRequests()); // Should trigger full snapshot request. ozoneManagerServiceProvider.syncDataFromOM(); @@ -429,7 +429,7 @@ public void testSyncDataFromOMFullSnapshot( assertEquals(OmSnapshotRequest.name(), captor.getValue().getTaskName()); verify(reconTaskControllerMock, times(1)) .reInitializeTasks(omMetadataManager); - assertEquals(1, metrics.getNumSnapshotRequests().value()); + assertEquals(1, metrics.getNumSnapshotRequests()); } @Test @@ -470,7 +470,7 @@ public void testSyncDataFromOMDeltaUpdates( verify(reconTaskControllerMock, times(1)) .consumeOMEvents(any(OMUpdateEventBatch.class), any(OMMetadataManager.class)); - assertEquals(0, metrics.getNumSnapshotRequests().value()); + assertEquals(0, metrics.getNumSnapshotRequests()); } @Test @@ -509,7 +509,7 @@ public void testSyncDataFromOMFullSnapshotForSNNFE( assertEquals(OmSnapshotRequest.name(), captor.getValue().getTaskName()); verify(reconTaskControllerMock, times(1)) .reInitializeTasks(omMetadataManager); - assertEquals(1, metrics.getNumSnapshotRequests().value()); + assertEquals(1, metrics.getNumSnapshotRequests()); } private ReconTaskController getMockTaskController() { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java index fbddd50ee4cb..f0af066c46f3 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java @@ -114,9 +114,9 @@ public void testInitNSSummaryTable() throws IOException { private void putThreeNSMetadata() throws IOException { HashMap hmap = new HashMap<>(); - hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1")); - hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2")); - hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3")); + hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1)); + hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1)); + hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1)); RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); for (Map.Entry entry: hmap.entrySet()) { reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java index eff330a796c9..a996f167a1bb 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java @@ -18,6 +18,11 @@ package org.apache.hadoop.ozone.recon.tasks; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING; import static org.hadoop.ozone.recon.schema.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.BDDMockito.given; @@ -84,18 +89,21 @@ public void setUp() { @Test public void testProcess() { // mock a container with invalid used bytes - final ContainerInfo omContainerInfo0 = mock(ContainerInfo.class); + ContainerInfo omContainerInfo0 = mock(ContainerInfo.class); given(omContainerInfo0.containerID()).willReturn(new ContainerID(0)); given(omContainerInfo0.getUsedBytes()).willReturn(-1L); + given(omContainerInfo0.getState()).willReturn(OPEN); // Write 2 keys ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); given(omContainerInfo1.containerID()).willReturn(new ContainerID(1)); given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB + given(omContainerInfo1.getState()).willReturn(CLOSED); ContainerInfo omContainerInfo2 = mock(ContainerInfo.class); given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB + given(omContainerInfo2.getState()).willReturn(CLOSING); // mock getContainers method to return a list of containers List containers = new ArrayList<>(); @@ -105,8 +113,8 @@ public void testProcess() { task.process(containers); - // Verify 2 containers are in correct bins. - assertEquals(2, containerCountBySizeDao.count()); + // Verify 3 containers are in correct bins. + assertEquals(3, containerCountBySizeDao.count()); // container size upper bound for // 1500000000L (1.5GB) is 2147483648L = 2^31 = 2GB (next highest power of 2) @@ -124,10 +132,11 @@ public void testProcess() { containerCountBySizeDao.findById(recordToFind.value1()).getCount() .longValue()); - // Add a new key + // Add a new container ContainerInfo omContainerInfo3 = mock(ContainerInfo.class); given(omContainerInfo3.containerID()).willReturn(new ContainerID(3)); given(omContainerInfo3.getUsedBytes()).willReturn(1000000000L); // 1GB + given(omContainerInfo3.getState()).willReturn(QUASI_CLOSED); containers.add(omContainerInfo3); // Update existing key. @@ -137,7 +146,7 @@ public void testProcess() { task.process(containers); // Total size groups added to the database - assertEquals(4, containerCountBySizeDao.count()); + assertEquals(5, containerCountBySizeDao.count()); // Check whether container size upper bound for // 50000L is 536870912L = 2^29 = 512MB (next highest power of 2) @@ -164,4 +173,59 @@ public void testProcess() { .getCount() .longValue()); } + + @Test + public void testProcessDeletedAndNegativeSizedContainers() { + // Create a list of containers, including one that is deleted + ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); + given(omContainerInfo1.containerID()).willReturn(new ContainerID(1)); + given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB + given(omContainerInfo1.getState()).willReturn(OPEN); + + ContainerInfo omContainerInfo2 = mock(ContainerInfo.class); + given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); + given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB + given(omContainerInfo2.getState()).willReturn(CLOSED); + + ContainerInfo omContainerInfoDeleted = mock(ContainerInfo.class); + given(omContainerInfoDeleted.containerID()).willReturn(new ContainerID(3)); + given(omContainerInfoDeleted.getUsedBytes()).willReturn(1000000000L); + given(omContainerInfoDeleted.getState()).willReturn(DELETED); // 1GB + + // Create a mock container with negative size + final ContainerInfo negativeSizeContainer = mock(ContainerInfo.class); + given(negativeSizeContainer.containerID()).willReturn(new ContainerID(0)); + given(negativeSizeContainer.getUsedBytes()).willReturn(-1L); + given(negativeSizeContainer.getState()).willReturn(OPEN); + + // Create a mock container with negative size and DELETE state + final ContainerInfo negativeSizeDeletedContainer = + mock(ContainerInfo.class); + given(negativeSizeDeletedContainer.containerID()).willReturn( + new ContainerID(0)); + given(negativeSizeDeletedContainer.getUsedBytes()).willReturn(-1L); + given(negativeSizeDeletedContainer.getState()).willReturn(DELETED); + + // Create a mock container with id 1 and updated size of 1GB from 1.5GB + final ContainerInfo validSizeContainer = mock(ContainerInfo.class); + given(validSizeContainer.containerID()).willReturn(new ContainerID(1)); + given(validSizeContainer.getUsedBytes()).willReturn(1000000000L); // 1GB + given(validSizeContainer.getState()).willReturn(CLOSED); + + // Mock getContainers method to return a list of containers including + // both valid and invalid ones + List containers = new ArrayList<>(); + containers.add(omContainerInfo1); + containers.add(omContainerInfo2); + containers.add(omContainerInfoDeleted); + containers.add(negativeSizeContainer); + containers.add(negativeSizeDeletedContainer); + containers.add(validSizeContainer); + + task.process(containers); + + // Verify that only the valid containers are counted + assertEquals(3, containerCountBySizeDao.count()); + } + } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java index 6992c3100fb9..485804240d52 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java @@ -166,7 +166,7 @@ public void setUp() throws Exception { reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID); assertNotNull(nsSummaryForBucket1); assertNotNull(nsSummaryForBucket2); - assertNull(nsSummaryForBucket3); + assertNotNull(nsSummaryForBucket3); } @Test @@ -233,7 +233,7 @@ public void setUp() throws IOException { assertNotNull(nsSummaryForBucket2); nsSummaryForBucket3 = reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID); - assertNull(nsSummaryForBucket3); + assertNotNull(nsSummaryForBucket3); } private OMUpdateEventBatch processEventBatch() throws IOException { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java index 66c522cb4d70..ba2e7497417e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java @@ -52,8 +52,8 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; /** * Test for NSSummaryTaskWithFSO. @@ -270,6 +270,37 @@ public void testReprocessDirsUnderDir() throws Exception { assertEquals(DIR_ONE, nsSummaryInDir1.getDirName()); assertEquals(DIR_TWO, nsSummaryInDir2.getDirName()); } + + @Test + public void testDirectoryParentIdAssignment() throws Exception { + // Trigger reprocess to simulate reading from OM DB and processing into NSSummary. + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); + + // Fetch NSSummary for DIR_ONE and verify its parent ID matches BUCKET_ONE_OBJECT_ID. + NSSummary nsSummaryDirOne = + reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID); + assertNotNull(nsSummaryDirOne, + "NSSummary for DIR_ONE should not be null."); + assertEquals(BUCKET_ONE_OBJECT_ID, nsSummaryDirOne.getParentId(), + "DIR_ONE's parent ID should match BUCKET_ONE_OBJECT_ID."); + + // Fetch NSSummary for DIR_TWO and verify its parent ID matches DIR_ONE_OBJECT_ID. + NSSummary nsSummaryDirTwo = + reconNamespaceSummaryManager.getNSSummary(DIR_TWO_OBJECT_ID); + assertNotNull(nsSummaryDirTwo, + "NSSummary for DIR_TWO should not be null."); + assertEquals(DIR_ONE_OBJECT_ID, nsSummaryDirTwo.getParentId(), + "DIR_TWO's parent ID should match DIR_ONE_OBJECT_ID."); + + // Fetch NSSummary for DIR_THREE and verify its parent ID matches DIR_ONE_OBJECT_ID. + NSSummary nsSummaryDirThree = + reconNamespaceSummaryManager.getNSSummary(DIR_THREE_OBJECT_ID); + assertNotNull(nsSummaryDirThree, + "NSSummary for DIR_THREE should not be null."); + assertEquals(DIR_ONE_OBJECT_ID, nsSummaryDirThree.getParentId(), + "DIR_THREE's parent ID should match DIR_ONE_OBJECT_ID."); + } + } /** @@ -462,6 +493,27 @@ public void testProcessDirDeleteRename() throws IOException { // after renaming dir1, check its new name assertEquals(DIR_ONE_RENAME, nsSummaryForDir1.getDirName()); } + + @Test + public void testParentIdAfterProcessEventBatch() throws IOException { + + // Verify the parent ID of DIR_FOUR after it's added under BUCKET_ONE. + NSSummary nsSummaryDirFour = + reconNamespaceSummaryManager.getNSSummary(DIR_FOUR_OBJECT_ID); + assertNotNull(nsSummaryDirFour, + "NSSummary for DIR_FOUR should not be null."); + assertEquals(BUCKET_ONE_OBJECT_ID, nsSummaryDirFour.getParentId(), + "DIR_FOUR's parent ID should match BUCKET_ONE_OBJECT_ID."); + + // Verify the parent ID of DIR_FIVE after it's added under BUCKET_TWO. + NSSummary nsSummaryDirFive = + reconNamespaceSummaryManager.getNSSummary(DIR_FIVE_OBJECT_ID); + assertNotNull(nsSummaryDirFive, + "NSSummary for DIR_FIVE should not be null."); + assertEquals(BUCKET_TWO_OBJECT_ID, nsSummaryDirFive.getParentId(), + "DIR_FIVE's parent ID should match BUCKET_TWO_OBJECT_ID."); + } + } /** diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java new file mode 100644 index 000000000000..db4803676390 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java @@ -0,0 +1,554 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Set; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +/** + * Test for NSSummaryTaskWithLegacy focusing on the OBS (Object Store) layout. + */ +public final class TestNSSummaryTaskWithLegacyOBSLayout { + + private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static OzoneConfiguration ozoneConfiguration; + private static NSSummaryTaskWithLegacy nSSummaryTaskWithLegacy; + + private static OMMetadataManager omMetadataManager; + private static OzoneConfiguration omConfiguration; + + // Object names + private static final String VOL = "vol"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "key1"; + private static final String KEY_TWO = "key2"; + private static final String KEY_THREE = "dir1/dir2/key3"; + private static final String KEY_FOUR = "key4///////////"; + private static final String KEY_FIVE = "//////////"; + private static final String KEY_SIX = "key6"; + private static final String KEY_SEVEN = "/////key7"; + + private static final String TEST_USER = "TestUser"; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long KEY_SEVEN_OBJECT_ID = 11L; + + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_OLD_SIZE = 1025L; + private static final long KEY_TWO_UPDATE_SIZE = 1023L; + private static final long KEY_THREE_SIZE = + ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L; + private static final long KEY_FOUR_SIZE = 2050L; + private static final long KEY_FIVE_SIZE = 100L; + private static final long KEY_SIX_SIZE = 6000L; + private static final long KEY_SEVEN_SIZE = 7000L; + + private TestNSSummaryTaskWithLegacyOBSLayout() { + } + + @BeforeAll + public static void setUp(@TempDir File tmpDir) throws Exception { + initializeNewOmMetadataManager(new File(tmpDir, "om")); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProviderWithFSO(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + new File(tmpDir, "recon")); + ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + false); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(tmpDir) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + + NSSummary nonExistentSummary = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNull(nonExistentSummary); + + populateOMDB(); + + nSSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( + reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + } + + /** + * Nested class for testing NSSummaryTaskWithLegacy reprocess. + */ + @Nested + public class TestReprocess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + @BeforeEach + public void setUp() throws IOException { + // write a NSSummary prior to reprocess + // verify it got cleaned up after. + NSSummary staleNSSummary = new NSSummary(); + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, -1L, + staleNSSummary); + reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); + + // Verify commit + assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + + nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + assertNotNull(nsSummaryForBucket2); + } + + @Test + public void testReprocessNSSummaryNull() throws IOException { + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + } + + @Test + public void testReprocessGetFiles() { + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + + assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + nsSummaryForBucket1.getSizeOfFiles()); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + @Test + public void testReprocessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {0, 1, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Nested class for testing NSSummaryTaskWithLegacy process. + */ + @Nested + public class TestProcess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + private OMDBUpdateEvent keyEvent1; + private OMDBUpdateEvent keyEvent2; + private OMDBUpdateEvent keyEvent3; + private OMDBUpdateEvent keyEvent4; + + @BeforeEach + public void setUp() throws IOException { + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + nSSummaryTaskWithLegacy.processWithLegacy(processEventBatch()); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket2); + } + + private OMUpdateEventBatch processEventBatch() throws IOException { + // Test PUT Event. + // PUT Key6 in Bucket2. + String omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_TWO + + OM_KEY_PREFIX + KEY_SIX; + OmKeyInfo omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_TWO, KEY_SIX, + KEY_SIX, KEY_SIX_OBJECT_ID, BUCKET_TWO_OBJECT_ID, KEY_SIX_SIZE); + keyEvent1 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + // PUT Key7 in Bucket1. + omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_SEVEN; + omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_SEVEN, + KEY_SEVEN, KEY_SEVEN_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_SEVEN_SIZE); + keyEvent2 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + + // Test DELETE Event. + // Delete Key1 in Bucket1. + String omDeleteKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_ONE; + OmKeyInfo omDeleteKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_ONE_SIZE); + keyEvent3 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omDeleteKey) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setValue(omDeleteKeyInfo) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) + .build(); + + // Test UPDATE Event. + // Resize Key2 in Bucket1. + String omResizeKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_TWO; + OmKeyInfo oldOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE); + OmKeyInfo newOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE + 100); + keyEvent4 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omResizeKey) + .setOldValue(oldOmResizeKeyInfo) + .setValue(newOmResizeKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) + .build(); + + return new OMUpdateEventBatch( + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); + } + + @Test + public void testProcessForCount() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + + Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); + assertEquals(0, childDirBucket1.size()); + Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); + assertEquals(0, childDirBucket2.size()); + } + + @Test + public void testProcessForSize() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals( + KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, + nsSummaryForBucket1.getSizeOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + + @Test + public void testProcessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {1, 3, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2, 3}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Populate OMDB with the following configs. + * vol + * / \ + * bucket1 bucket2 + * / \ \ \ \ + * key1 key2 key3 key4 key5 + * + * @throws IOException + */ + private static void populateOMDB() throws IOException { + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_OLD_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FIVE_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static void initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + omMetadataManager.getVolumeTable().put(volumeKey, args); + + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + } + + /** + * Build a key info for put/update action. + * + * @param volume volume name + * @param bucket bucket name + * @param key key name + * @param fileName file name + * @param objectID object ID + * @param parentObjectId parent object ID + * @param dataSize file size + * @return the KeyInfo + */ + private static OmKeyInfo buildOmKeyInfo(String volume, + String bucket, + String key, + String fileName, + long objectID, + long parentObjectId, + long dataSize) { + return new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setFileName(fileName) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .setObjectID(objectID) + .setParentObjectID(parentObjectId) + .setDataSize(dataSize) + .build(); + } + + // Helper method to check if an array contains a specific value + private boolean contains(int[] arr, int value) { + for (int num : arr) { + if (num == value) { + return true; + } + } + return false; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.LEGACY; + } +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java new file mode 100644 index 000000000000..8f9d6b2990a5 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -0,0 +1,548 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.io.Serializable; +import java.util.Arrays; +import java.util.Set; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +/** + * Unit test for NSSummaryTaskWithOBS. + */ +public final class TestNSSummaryTaskWithOBS implements Serializable { + private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private static OMMetadataManager omMetadataManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static NSSummaryTaskWithOBS nSSummaryTaskWithOBS; + private static OzoneConfiguration omConfiguration; + + // Object names + private static final String VOL = "vol"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "key1"; + private static final String KEY_TWO = "key2"; + private static final String KEY_THREE = "dir1/dir2/key3"; + private static final String KEY_FOUR = "key4///////////"; + private static final String KEY_FIVE = "//////////"; + private static final String KEY_SIX = "key6"; + private static final String KEY_SEVEN = "key7"; + + private static final String TEST_USER = "TestUser"; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long KEY_SEVEN_OBJECT_ID = 11L; + + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_OLD_SIZE = 1025L; + private static final long KEY_TWO_UPDATE_SIZE = 1023L; + private static final long KEY_THREE_SIZE = + ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L; + private static final long KEY_FOUR_SIZE = 2050L; + private static final long KEY_FIVE_SIZE = 100L; + private static final long KEY_SIX_SIZE = 6000L; + private static final long KEY_SEVEN_SIZE = 7000L; + + private TestNSSummaryTaskWithOBS() { + } + + @BeforeAll + public static void setUp(@TempDir File tmpDir) throws Exception { + initializeNewOmMetadataManager(new File(tmpDir, "om")); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProviderWithFSO(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + new File(tmpDir, "recon")); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(tmpDir) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + + NSSummary nonExistentSummary = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNull(nonExistentSummary); + + populateOMDB(); + + nSSummaryTaskWithOBS = new NSSummaryTaskWithOBS( + reconNamespaceSummaryManager, + reconOMMetadataManager, omConfiguration); + } + + /** + * Nested class for testing NSSummaryTaskWithOBS reprocess. + */ + @Nested + public class TestReprocess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + @BeforeEach + public void setUp() throws IOException { + // write a NSSummary prior to reprocess + // verify it got cleaned up after. + NSSummary staleNSSummary = new NSSummary(); + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, -1L, + staleNSSummary); + reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); + + // Verify commit + assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + + nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + assertNotNull(nsSummaryForBucket2); + } + + @Test + public void testReprocessNSSummaryNull() throws IOException { + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + } + + @Test + public void testReprocessGetFiles() { + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + + assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + nsSummaryForBucket1.getSizeOfFiles()); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + @Test + public void testReprocessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {0, 1, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Nested class for testing NSSummaryTaskWithOBS process. + */ + @Nested + public class TestProcess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + private OMDBUpdateEvent keyEvent1; + private OMDBUpdateEvent keyEvent2; + private OMDBUpdateEvent keyEvent3; + private OMDBUpdateEvent keyEvent4; + + @BeforeEach + public void setUp() throws IOException { + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + nSSummaryTaskWithOBS.processWithOBS(processEventBatch()); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket2); + } + + private OMUpdateEventBatch processEventBatch() throws IOException { + // Test PUT Event. + // PUT Key6 in Bucket2. + String omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_TWO + + OM_KEY_PREFIX + KEY_SIX; + OmKeyInfo omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_TWO, KEY_SIX, + KEY_SIX, KEY_SIX_OBJECT_ID, BUCKET_TWO_OBJECT_ID, KEY_SIX_SIZE); + keyEvent1 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + // PUT Key7 in Bucket1. + omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_SEVEN; + omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_SEVEN, + KEY_SEVEN, KEY_SEVEN_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_SEVEN_SIZE); + keyEvent2 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + + // Test DELETE Event. + // Delete Key1 in Bucket1. + String omDeleteKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_ONE; + OmKeyInfo omDeleteKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_ONE_SIZE); + keyEvent3 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omDeleteKey) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setValue(omDeleteKeyInfo) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) + .build(); + + // Test UPDATE Event. + // Resize Key2 in Bucket1. + String omResizeKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_TWO; + OmKeyInfo oldOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE); + OmKeyInfo newOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE + 100); + keyEvent4 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omResizeKey) + .setOldValue(oldOmResizeKeyInfo) + .setValue(newOmResizeKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) + .build(); + + return new OMUpdateEventBatch( + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); + } + + @Test + public void testProcessForCount() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + + Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); + assertEquals(0, childDirBucket1.size()); + Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); + assertEquals(0, childDirBucket2.size()); + } + + @Test + public void testProcessForSize() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals( + KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, + nsSummaryForBucket1.getSizeOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + + @Test + public void testProcessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {1, 3, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2, 3}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Populate OMDB with the following configs. + * vol + * / \ + * bucket1 bucket2 + * / \ \ \ \ + * key1 key2 key3 key4 key5 + * + * @throws IOException + */ + private static void populateOMDB() throws IOException { + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_OLD_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FIVE_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static void initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + omMetadataManager.getVolumeTable().put(volumeKey, args); + + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + } + + /** + * Build a key info for put/update action. + * @param volume volume name + * @param bucket bucket name + * @param key key name + * @param fileName file name + * @param objectID object ID + * @param parentObjectId parent object ID + * @param dataSize file size + * @return the KeyInfo + */ + private static OmKeyInfo buildOmKeyInfo(String volume, + String bucket, + String key, + String fileName, + long objectID, + long parentObjectId, + long dataSize) { + return new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setFileName(fileName) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .setObjectID(objectID) + .setParentObjectID(parentObjectId) + .setDataSize(dataSize) + .build(); + } + + // Helper method to check if an array contains a specific value + private boolean contains(int[] arr, int value) { + for (int num : arr) { + if (num == value) { + return true; + } + } + return false; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index df014f4276fa..56d8fe213152 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -21,20 +21,28 @@ import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TypedTable; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; - +import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.jooq.DSLContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; import org.junit.jupiter.api.io.TempDir; import java.io.IOException; @@ -44,18 +52,20 @@ import java.util.Arrays; import java.util.List; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE; @@ -66,29 +76,83 @@ import static org.mockito.Mockito.when; /** - * Unit test for Object Count Task. + * This test class is designed for the OM Table Insight Task. It conducts tests + * for tables that require both Size and Count, as well as for those that only + * require Count. */ public class TestOmTableInsightTask extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; - private GlobalStatsDao globalStatsDao; - private OmTableInsightTask omTableInsightTask; - private DSLContext dslContext; + private static GlobalStatsDao globalStatsDao; + private static OmTableInsightTask omTableInsightTask; + private static DSLContext dslContext; private boolean isSetupDone = false; - private ReconOMMetadataManager reconOMMetadataManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static NSSummaryTaskWithFSO nSSummaryTaskWithFso; + private static OzoneConfiguration ozoneConfiguration; + private static ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; + + // Object names in FSO-enabled format + private static final String VOL = "volume1"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "file1"; + private static final String KEY_TWO = "file2"; + private static final String KEY_THREE = "dir1/dir2/file3"; + private static final String FILE_ONE = "file1"; + private static final String FILE_TWO = "file2"; + private static final String FILE_THREE = "file3"; + private static final String DIR_ONE = "dir1"; + private static final String DIR_TWO = "dir2"; + private static final String DIR_THREE = "dir3"; + + + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long DIR_ONE_OBJECT_ID = 14L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long DIR_TWO_OBJECT_ID = 17L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long DIR_THREE_OBJECT_ID = 10L; + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_SIZE = 1025L; + private static final long KEY_THREE_SIZE = 2000L; + + // mock client's path requests + private static final String TEST_USER = "TestUser"; + + @Mock + private Table nsSummaryTable; public TestOmTableInsightTask() { super(); } private void initializeInjector() throws IOException { + ozoneConfiguration = new OzoneConfiguration(); reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( temporaryFolder.resolve("JunitOmDBDir")).toFile()), Files.createDirectory(temporaryFolder.resolve("NewDir")).toFile()); globalStatsDao = getDao(GlobalStatsDao.class); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconSqlDb() + .withReconOm(reconOMMetadataManager) + .withContainerDB() + .build(); + reconNamespaceSummaryManager = reconTestInjector.getInstance( + ReconNamespaceSummaryManagerImpl.class); + omTableInsightTask = new OmTableInsightTask( globalStatsDao, getConfiguration(), reconOMMetadataManager); + nSSummaryTaskWithFso = new NSSummaryTaskWithFSO( + reconNamespaceSummaryManager, reconOMMetadataManager, + ozoneConfiguration); dslContext = getDslContext(); } @@ -99,10 +163,182 @@ public void setUp() throws IOException { initializeInjector(); isSetupDone = true; } + MockitoAnnotations.openMocks(this); // Truncate table before running each test dslContext.truncate(GLOBAL_STATS); } + /** + * Populate OM-DB with the following structure. + * volume1 + * | \ + * bucket1 bucket2 + * / \ \ + * dir1 dir2 dir3 + * / \ \ + * file1 file2 file3 + * + * @throws IOException + */ + private void populateOMDB() throws IOException { + + // Create 2 Buckets bucket1 and bucket2 + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .build(); + String bucketKey = reconOMMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .build(); + bucketKey = reconOMMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo2); + + // Create a single volume named volume1 + String volumeKey = reconOMMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + reconOMMetadataManager.getVolumeTable().put(volumeKey, args); + + // Generate keys for the File Table + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + FILE_ONE, + KEY_ONE_OBJECT_ID, + DIR_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + FILE_TWO, + KEY_TWO_OBJECT_ID, + DIR_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + FILE_THREE, + KEY_THREE_OBJECT_ID, + DIR_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // Generate Deleted Directories in OM + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_ONE, + VOL, + DIR_ONE, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + DIR_ONE_OBJECT_ID); + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_ONE, + VOL, + DIR_TWO, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + DIR_TWO_OBJECT_ID); + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_TWO, + VOL, + DIR_THREE, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + DIR_THREE_OBJECT_ID); + } + + @Test + public void testReprocessForDeletedDirectory() throws Exception { + // Create keys and deleted directories + populateOMDB(); + + // Generate NamespaceSummary for the OM DB + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); + + Pair result = + omTableInsightTask.reprocess(reconOMMetadataManager); + assertTrue(result.getRight()); + assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); + } + + @Test + public void testProcessForDeletedDirectoryTable() throws IOException { + // Prepare mock data size + Long expectedSize1 = 1000L; + Long expectedSize2 = 2000L; + NSSummary nsSummary1 = new NSSummary(); + NSSummary nsSummary2 = new NSSummary(); + nsSummary1.setSizeOfFiles(expectedSize1); + nsSummary2.setSizeOfFiles(expectedSize2); + when(nsSummaryTable.get(1L)).thenReturn(nsSummary1); + when(nsSummaryTable.get(2L)).thenReturn(nsSummary1); + when(nsSummaryTable.get(3L)).thenReturn(nsSummary2); + when(nsSummaryTable.get(4L)).thenReturn(nsSummary2); + when(nsSummaryTable.get(5L)).thenReturn(nsSummary2); + + /* DB key in DeletedDirectoryTable => + "/volumeId/bucketId/parentId/dirName/dirObjectId" */ + List paths = Arrays.asList( + "/18/28/22/dir1/1", + "/18/26/23/dir1/2", + "/18/20/24/dir1/3", + "/18/21/25/dir1/4", + "/18/27/26/dir1/5" + ); + + // Testing PUT events + // Create 5 OMDBUpdateEvent instances for 5 different deletedDirectory paths + ArrayList putEvents = new ArrayList<>(); + for (long i = 0L; i < 5L; i++) { + putEvents.add(getOMUpdateEvent(paths.get((int) i), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, (i + 1), false), + DELETED_DIR_TABLE, PUT, null)); + } + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); + omTableInsightTask.process(putEventBatch); + assertEquals(5, getCountForTable(DELETED_DIR_TABLE)); + + + // Testing DELETE events + // Create 2 OMDBUpdateEvent instances for 2 different deletedDirectory paths + ArrayList deleteEvents = new ArrayList<>(); + deleteEvents.add(getOMUpdateEvent(paths.get(0), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, 1L, false), DELETED_DIR_TABLE, + DELETE, null)); + deleteEvents.add(getOMUpdateEvent(paths.get(2), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, 3L, false), DELETED_DIR_TABLE, + DELETE, null)); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); + omTableInsightTask.process(deleteEventBatch); + assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); + } + @Test public void testReprocessForCount() throws Exception { OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); @@ -110,27 +346,32 @@ public void testReprocessForCount() throws Exception { // Mock 5 rows in each table and test the count for (String tableName : omTableInsightTask.getTaskTables()) { TypedTable table = mock(TypedTable.class); - TypedTable.TypedTableIterator mockIter = mock(TypedTable - .TypedTableIterator.class); + TypedTable.TypedTableIterator mockIter = + mock(TypedTable.TypedTableIterator.class); when(table.iterator()).thenReturn(mockIter); when(omMetadataManager.getTable(tableName)).thenReturn(table); - when(mockIter.hasNext()) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(false); + when(mockIter.hasNext()).thenReturn(true, true, true, true, true, false); + TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class); - when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); + + if (tableName.equals(DELETED_TABLE)) { + RepeatedOmKeyInfo keyInfo = mock(RepeatedOmKeyInfo.class); + when(keyInfo.getTotalSize()).thenReturn(ImmutablePair.of(100L, 100L)); + when(keyInfo.getOmKeyInfoList()).thenReturn( + Arrays.asList(mock(OmKeyInfo.class))); + when(mockKeyValue.getValue()).thenReturn(keyInfo); + } else { + when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); + } + when(mockIter.next()).thenReturn(mockKeyValue); } Pair result = omTableInsightTask.reprocess(omMetadataManager); - assertTrue(result.getRight()); + assertTrue(result.getRight()); assertEquals(5L, getCountForTable(KEY_TABLE)); assertEquals(5L, getCountForTable(VOLUME_TABLE)); assertEquals(5L, getCountForTable(BUCKET_TABLE)); @@ -138,7 +379,6 @@ public void testReprocessForCount() throws Exception { assertEquals(5L, getCountForTable(DELETED_TABLE)); } - @Test public void testReprocessForOpenKeyTable() throws Exception { // Populate the OpenKeys table in OM DB @@ -203,44 +443,73 @@ public void testReprocessForDeletedTable() throws Exception { @Test public void testProcessForCount() { - ArrayList events = new ArrayList<>(); - // Create 5 put, 1 delete and 1 update event for each table + List initialEvents = new ArrayList<>(); + + // Creating events for each table except the deleted table for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; // Skipping deleted table as it has a separate test + } + + // Adding 5 PUT events per table for (int i = 0; i < 5; i++) { - events.add(getOMUpdateEvent("item" + i, null, tableName, PUT, null)); + initialEvents.add( + getOMUpdateEvent("item" + i, mock(OmKeyInfo.class), tableName, PUT, + null)); } - // for delete event, if value is set to null, the counter will not be - // decremented. This is because the value will be null if item does not - // exist in the database and there is no need to delete. - events.add(getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, - DELETE, null)); - events.add(getOMUpdateEvent("item1", null, tableName, UPDATE, null)); + + // Adding 1 DELETE event where value is null, indicating non-existence + // in the database. + initialEvents.add( + getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, DELETE, + null)); + // Adding 1 UPDATE event. This should not affect the count. + initialEvents.add( + getOMUpdateEvent("item1", mock(OmKeyInfo.class), tableName, UPDATE, + mock(OmKeyInfo.class))); } - OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(events); - omTableInsightTask.process(omUpdateEventBatch); - // Verify 4 items in each table. (5 puts - 1 delete + 0 update) - assertEquals(4L, getCountForTable(KEY_TABLE)); - assertEquals(4L, getCountForTable(VOLUME_TABLE)); - assertEquals(4L, getCountForTable(BUCKET_TABLE)); - assertEquals(4L, getCountForTable(FILE_TABLE)); + // Processing the initial batch of events + OMUpdateEventBatch initialBatch = new OMUpdateEventBatch(initialEvents); + omTableInsightTask.process(initialBatch); - // add a new key and simulate delete on non-existing item (value: null) - ArrayList newEvents = new ArrayList<>(); + // Verifying the count in each table for (String tableName : omTableInsightTask.getTaskTables()) { - newEvents.add(getOMUpdateEvent("item5", null, tableName, PUT, null)); - // This delete event should be a noop since value is null - newEvents.add(getOMUpdateEvent("item0", null, tableName, DELETE, null)); + if (tableName.equals(DELETED_TABLE)) { + continue; + } + assertEquals(4L, getCountForTable( + tableName)); // 4 items expected after processing (5 puts - 1 delete) } - omUpdateEventBatch = new OMUpdateEventBatch(newEvents); - omTableInsightTask.process(omUpdateEventBatch); + List additionalEvents = new ArrayList<>(); + // Simulating new PUT and DELETE events + for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; + } + // Adding 1 new PUT event + additionalEvents.add( + getOMUpdateEvent("item6", mock(OmKeyInfo.class), tableName, PUT, + null)); + // Attempting to delete a non-existing item (value: null) + additionalEvents.add( + getOMUpdateEvent("item0", null, tableName, DELETE, null)); + } - // Verify 5 items in each table. (1 new put + 0 delete) - assertEquals(5L, getCountForTable(KEY_TABLE)); - assertEquals(5L, getCountForTable(VOLUME_TABLE)); - assertEquals(5L, getCountForTable(BUCKET_TABLE)); - assertEquals(5L, getCountForTable(FILE_TABLE)); + // Processing the additional events + OMUpdateEventBatch additionalBatch = + new OMUpdateEventBatch(additionalEvents); + omTableInsightTask.process(additionalBatch); + // Verifying the final count in each table + for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; + } + // 5 items expected after processing the additional events. + assertEquals(5L, getCountForTable( + tableName)); + } } @Test @@ -251,35 +520,38 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { when(omKeyInfo.getDataSize()).thenReturn(sizeToBeReturned); when(omKeyInfo.getReplicatedSize()).thenReturn(sizeToBeReturned * 3); - // Test PUT events + // Test PUT events. + // Add 5 PUT events for OpenKeyTable and OpenFileTable. ArrayList putEvents = new ArrayList<>(); - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { - for (int i = 0; i < 5; i++) { - putEvents.add( - getOMUpdateEvent("item" + i, omKeyInfo, tableName, PUT, null)); - } + for (int i = 0; i < 10; i++) { + String table = (i < 5) ? OPEN_KEY_TABLE : OPEN_FILE_TABLE; + putEvents.add(getOMUpdateEvent("item" + i, omKeyInfo, table, PUT, null)); } + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); omTableInsightTask.process(putEventBatch); - // After 5 PUTs, size should be 5 * 1000 = 5000 for each size-related table - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + // After 5 PUTs, size should be 5 * 1000 = 5000 + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } // Test DELETE events ArrayList deleteEvents = new ArrayList<>(); - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { - // Delete "item0" - deleteEvents.add( - getOMUpdateEvent("item0", omKeyInfo, tableName, DELETE, null)); - } + // Delete "item0" for OpenKeyTable and OpenFileTable. + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, OPEN_KEY_TABLE, DELETE, null)); + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, OPEN_FILE_TABLE, DELETE, null)); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); omTableInsightTask.process(deleteEventBatch); // After deleting "item0", size should be 4 * 1000 = 4000 - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(4000L, getUnReplicatedSizeForTable(tableName)); assertEquals(12000L, getReplicatedSizeForTable(tableName)); } @@ -287,7 +559,8 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { // Test UPDATE events ArrayList updateEvents = new ArrayList<>(); Long newSizeToBeReturned = 2000L; - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { // Update "item1" with a new size OmKeyInfo newKeyInfo = mock(OmKeyInfo.class); when(newKeyInfo.getDataSize()).thenReturn(newSizeToBeReturned); @@ -295,12 +568,14 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { updateEvents.add( getOMUpdateEvent("item1", newKeyInfo, tableName, UPDATE, omKeyInfo)); } + OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); omTableInsightTask.process(updateEventBatch); // After updating "item1", size should be 4000 - 1000 + 2000 = 5000 // presentValue - oldValue + newValue = updatedValue - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } @@ -313,9 +588,10 @@ public void testProcessForDeletedTable() { new ImmutablePair<>(1000L, 3000L); ArrayList omKeyInfoList = new ArrayList<>(); // Add 5 OmKeyInfo objects to the list - for (int i = 0; i < 5; i++) { + for (long i = 0; i < 5; i++) { OmKeyInfo omKeyInfo = - getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", true); + getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", i + 1, + true); // Set properties of OmKeyInfo object if needed omKeyInfoList.add(omKeyInfo); } @@ -353,38 +629,14 @@ public void testProcessForDeletedTable() { // After deleting "item0", size should be 4 * 1000 = 4000 assertEquals(4000L, getUnReplicatedSizeForTable(DELETED_TABLE)); assertEquals(12000L, getReplicatedSizeForTable(DELETED_TABLE)); - - - // Test UPDATE events - ArrayList updateEvents = new ArrayList<>(); - // Update "item1" with new sizes - ImmutablePair newSizesToBeReturned = - new ImmutablePair<>(500L, 1500L); - RepeatedOmKeyInfo newRepeatedOmKeyInfo = mock(RepeatedOmKeyInfo.class); - when(newRepeatedOmKeyInfo.getTotalSize()).thenReturn(newSizesToBeReturned); - when(newRepeatedOmKeyInfo.getOmKeyInfoList()).thenReturn( - omKeyInfoList.subList(1, 5)); - OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); - // For item1, newSize=500 and totalCount of deleted keys should be 4 - updateEvents.add( - getOMUpdateEvent("item1", newRepeatedOmKeyInfo, DELETED_TABLE, UPDATE, - repeatedOmKeyInfo)); - omTableInsightTask.process(updateEventBatch); - // Since one key has been deleted, total deleted keys should be 19 - assertEquals(19L, getCountForTable(DELETED_TABLE)); - // After updating "item1", size should be 4000 - 1000 + 500 = 3500 - // presentValue - oldValue + newValue = updatedValue - assertEquals(3500L, getUnReplicatedSizeForTable(DELETED_TABLE)); - assertEquals(10500L, getReplicatedSizeForTable(DELETED_TABLE)); } - private OMDBUpdateEvent getOMUpdateEvent( String name, Object value, String table, OMDBUpdateEvent.OMDBUpdateAction action, Object oldValue) { - return new OMUpdateEventBuilder() + return new OMDBUpdateEvent.OMUpdateEventBuilder() .setAction(action) .setKey(name) .setValue(value) @@ -409,7 +661,8 @@ private long getReplicatedSizeForTable(String tableName) { } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, - String keyName, boolean isFile) { + String keyName, Long objectID, + boolean isFile) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -418,6 +671,7 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(100L) + .setObjectID(objectID) .build(); } } diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index f875047d04a2..18bbd906a0b1 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -99,6 +99,12 @@ io.grpc grpc-protobuf + + + com.google.code.findbugs + jsr305 + + io.grpc diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java index 47b59cfcc0e8..5122705ae702 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java @@ -17,10 +17,14 @@ */ package org.apache.hadoop.ozone.s3.commontypes; +import org.apache.hadoop.ozone.s3.endpoint.S3Owner; + import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; +import org.apache.hadoop.ozone.OzoneConsts; + import java.time.Instant; /** @@ -33,11 +37,14 @@ public class KeyMetadata { @XmlElement(name = "Key") private EncodingTypeObject key; // or the Object Name + @XmlElement(name = "Owner") + private S3Owner owner; + @XmlJavaTypeAdapter(IsoDateAdapter.class) @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; @XmlElement(name = "Size") @@ -54,6 +61,14 @@ public void setKey(EncodingTypeObject key) { this.key = key; } + public S3Owner getOwner() { + return owner; + } + + public void setOwner(S3Owner owner) { + this.owner = owner; + } + public Instant getLastModified() { return lastModified; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index b8cd56d5f954..ec434e4bb566 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -63,12 +63,13 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.BitSet; +import java.util.EnumSet; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Set; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -665,14 +666,11 @@ private List getAndConvertAclOnBucket(String value, throw newError(NOT_IMPLEMENTED, part[0]); } // Build ACL on Bucket - BitSet aclsOnBucket = - S3Acl.getOzoneAclOnBucketFromS3Permission(permission); + EnumSet aclsOnBucket = S3Acl.getOzoneAclOnBucketFromS3Permission(permission); OzoneAcl defaultOzoneAcl = new OzoneAcl( - IAccessAuthorizer.ACLIdentityType.USER, part[1], aclsOnBucket, - OzoneAcl.AclScope.DEFAULT); - OzoneAcl accessOzoneAcl = new OzoneAcl( - IAccessAuthorizer.ACLIdentityType.USER, part[1], aclsOnBucket, - ACCESS); + IAccessAuthorizer.ACLIdentityType.USER, part[1], OzoneAcl.AclScope.DEFAULT, aclsOnBucket + ); + OzoneAcl accessOzoneAcl = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnBucket); ozoneAclList.add(defaultOzoneAcl); ozoneAclList.add(accessOzoneAcl); } @@ -699,11 +697,9 @@ private List getAndConvertAclOnVolume(String value, throw newError(NOT_IMPLEMENTED, part[0]); } // Build ACL on Volume - BitSet aclsOnVolume = + EnumSet aclsOnVolume = S3Acl.getOzoneAclOnVolumeFromS3Permission(permission); - OzoneAcl accessOzoneAcl = new OzoneAcl( - IAccessAuthorizer.ACLIdentityType.USER, part[1], aclsOnVolume, - ACCESS); + OzoneAcl accessOzoneAcl = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnVolume); ozoneAclList.add(accessOzoneAcl); } return ozoneAclList; @@ -714,7 +710,10 @@ private void addKey(ListObjectResponse response, OzoneKey next) { keyMetadata.setKey(EncodingTypeObject.createNullable(next.getName(), response.getEncodingType())); keyMetadata.setSize(next.getDataSize()); - keyMetadata.setETag("" + next.getModificationTime()); + String eTag = next.getMetadata().get(ETAG); + if (eTag != null) { + keyMetadata.setETag(ObjectEndpoint.wrapInQuotes(eTag)); + } if (next.getReplicationType().toString().equals(ReplicationType .STAND_ALONE.toString())) { keyMetadata.setStorageClass(S3StorageType.REDUCED_REDUNDANCY.toString()); @@ -722,6 +721,10 @@ private void addKey(ListObjectResponse response, OzoneKey next) { keyMetadata.setStorageClass(S3StorageType.STANDARD.toString()); } keyMetadata.setLastModified(next.getModificationTime()); + String ownerName = next.getOwner(); + String displayName = ownerName; + // Use ownerName to fill displayName + keyMetadata.setOwner(new S3Owner(ownerName, displayName)); response.addKey(keyMetadata); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java index 72289470c2ca..af5eafc9f438 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java @@ -23,6 +23,8 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; + import java.util.ArrayList; import java.util.List; @@ -55,7 +57,7 @@ public static class Part { @XmlElement(name = "PartNumber") private int partNumber; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public int getPartNumber() { @@ -66,12 +68,12 @@ public void setPartNumber(int partNumber) { this.partNumber = partNumber; } - public String geteTag() { + public String getETag() { return eTag; } - public void seteTag(String eTag) { - this.eTag = eTag; + public void setETag(String eTagHash) { + this.eTag = eTagHash; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java index 17f7f575a6ca..cdaaa228ecd7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java @@ -44,14 +44,13 @@ public class CompleteMultipartUploadRequestUnmarshaller implements MessageBodyReader { private final JAXBContext context; - private final XMLReader xmlReader; + private final SAXParserFactory saxParserFactory; public CompleteMultipartUploadRequestUnmarshaller() { try { context = JAXBContext.newInstance(CompleteMultipartUploadRequest.class); - SAXParserFactory saxParserFactory = SAXParserFactory.newInstance(); + saxParserFactory = SAXParserFactory.newInstance(); saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); - xmlReader = saxParserFactory.newSAXParser().getXMLReader(); } catch (Exception ex) { throw new AssertionError("Can not instantiate " + "CompleteMultipartUploadRequest parser", ex); @@ -70,6 +69,7 @@ public CompleteMultipartUploadRequest readFrom( MultivaluedMap multivaluedMap, InputStream inputStream) throws IOException, WebApplicationException { try { + XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); UnmarshallerHandler unmarshallerHandler = context.createUnmarshaller().getUnmarshallerHandler(); XmlNamespaceFilter filter = diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java index c636f36b175b..2aa30d6b839b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java @@ -22,6 +22,7 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; /** * Complete Multipart Upload request response. @@ -41,7 +42,7 @@ public class CompleteMultipartUploadResponse { @XmlElement(name = "Key") private String key; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public String getLocation() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java index 6e114c2e0c64..d1136fe9ed78 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -39,7 +40,7 @@ public class CopyObjectResponse { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java index c4e65aa38ff7..ab30c1f0e7c9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java @@ -25,6 +25,7 @@ import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; import java.time.Instant; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; /** @@ -39,7 +40,7 @@ public class CopyPartResult { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public CopyPartResult() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 5694d6f9f41b..4ffc30119359 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -19,6 +19,7 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; +import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; import javax.ws.rs.container.ContainerRequestContext; @@ -61,21 +62,28 @@ import org.apache.hadoop.ozone.s3.signature.SignatureInfo; import org.apache.hadoop.ozone.s3.util.AuditUtils; import org.apache.hadoop.util.Time; +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URLEncodedUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.KB; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_TAG; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_KEY_LENGTH_LIMIT; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_NUM_LIMIT; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_REGEX_PATTERN; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_VALUE_LENGTH_LIMIT; /** * Basic helpers for all the REST endpoints. */ public abstract class EndpointBase implements Auditor { - protected static final String ETAG = "ETag"; - protected static final String ETAG_CUSTOM = "etag-custom"; @Inject @@ -309,6 +317,20 @@ protected Map getCustomMetadataFromHeaders( customMetadata.put(mapKey, value); } } + + // If the request contains a custom metadata header "x-amz-meta-ETag", + // replace the metadata key to "etag-custom" to prevent key metadata collision with + // the ETag calculated by hashing the object when storing the key in OM table. + // The custom ETag metadata header will be rebuilt during the headObject operation. + if (customMetadata.containsKey(HttpHeaders.ETAG) + || customMetadata.containsKey(HttpHeaders.ETAG.toLowerCase())) { + String customETag = customMetadata.get(HttpHeaders.ETAG) != null ? + customMetadata.get(HttpHeaders.ETAG) : customMetadata.get(HttpHeaders.ETAG.toLowerCase()); + customMetadata.remove(HttpHeaders.ETAG); + customMetadata.remove(HttpHeaders.ETAG.toLowerCase()); + customMetadata.put(ETAG_CUSTOM, customETag); + } + return customMetadata; } @@ -322,6 +344,7 @@ protected void addCustomMetadataHeaders( } String metadataKey = entry.getKey(); if (metadataKey.equals(ETAG_CUSTOM)) { + // Rebuild the ETag custom metadata header metadataKey = ETAG.toLowerCase(); } responseBuilder @@ -330,6 +353,82 @@ protected void addCustomMetadataHeaders( } } + protected Map getTaggingFromHeaders(HttpHeaders httpHeaders) + throws OS3Exception { + String tagString = httpHeaders.getHeaderString(TAG_HEADER); + + if (StringUtils.isEmpty(tagString)) { + return Collections.emptyMap(); + } + + List tagPairs = URLEncodedUtils.parse(tagString, UTF_8); + + if (tagPairs.isEmpty()) { + return Collections.emptyMap(); + } + + Map tags = new HashMap<>(); + // Tag restrictions: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_S3Tag.html + for (NameValuePair tagPair: tagPairs) { + if (StringUtils.isEmpty(tagPair.getName())) { + OS3Exception ex = newError(INVALID_TAG, TAG_HEADER); + ex.setErrorMessage("Some tag keys are empty, please specify the non-empty tag keys"); + throw ex; + } + + if (tagPair.getValue() == null) { + // For example for query parameter with only value (e.g. "tag1") + OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + ex.setErrorMessage("Some tag values are not specified, please specify the tag values"); + throw ex; + } + + if (tags.containsKey(tagPair.getName())) { + // Tags that are associated with an object must have unique tag keys + // Reject request if the same key is used twice on the same resource + OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + ex.setErrorMessage("There are tags with duplicate tag keys, tag keys should be unique"); + throw ex; + } + + if (tagPair.getName().length() > TAG_KEY_LENGTH_LIMIT) { + OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + ex.setErrorMessage("The tag key exceeds the maximum length of " + TAG_KEY_LENGTH_LIMIT); + throw ex; + } + + if (tagPair.getValue().length() > TAG_VALUE_LENGTH_LIMIT) { + OS3Exception ex = newError(INVALID_TAG, tagPair.getValue()); + ex.setErrorMessage("The tag value exceeds the maximum length of " + TAG_VALUE_LENGTH_LIMIT); + throw ex; + } + + if (!TAG_REGEX_PATTERN.matcher(tagPair.getName()).matches()) { + OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + ex.setErrorMessage("The tag key does not have a valid pattern"); + throw ex; + } + + if (!TAG_REGEX_PATTERN.matcher(tagPair.getValue()).matches()) { + OS3Exception ex = newError(INVALID_TAG, tagPair.getValue()); + ex.setErrorMessage("The tag value does not have a valid pattern"); + throw ex; + } + + tags.put(tagPair.getName(), tagPair.getValue()); + } + + if (tags.size() > TAG_NUM_LIMIT) { + // You can associate up to 10 tags with an object. + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, TAG_HEADER); + ex.setErrorMessage("The number of tags " + tags.size() + + " exceeded the maximum number of tags of " + TAG_NUM_LIMIT); + throw ex; + } + + return tags; + } + private AuditMessage.Builder auditMessageBaseBuilder(AuditAction op, Map auditMap) { AuditMessage.Builder builder = new AuditMessage.Builder() diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java index fc9da14133c8..8f3fad735441 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -154,7 +155,7 @@ public static class Part { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java index f5745a8fc102..0c34c08091aa 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java @@ -43,14 +43,13 @@ public class MultiDeleteRequestUnmarshaller implements MessageBodyReader { private final JAXBContext context; - private final XMLReader xmlReader; + private final SAXParserFactory saxParserFactory; public MultiDeleteRequestUnmarshaller() { try { context = JAXBContext.newInstance(MultiDeleteRequest.class); - SAXParserFactory saxParserFactory = SAXParserFactory.newInstance(); + saxParserFactory = SAXParserFactory.newInstance(); saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); - xmlReader = saxParserFactory.newSAXParser().getXMLReader(); } catch (Exception ex) { throw new AssertionError("Can't instantiate MultiDeleteRequest parser", ex); @@ -68,6 +67,7 @@ public MultiDeleteRequest readFrom(Class type, Type genericType, Annotation[] annotations, MediaType mediaType, MultivaluedMap httpHeaders, InputStream entityStream) { try { + XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); UnmarshallerHandler unmarshallerHandler = context.createUnmarshaller().getUnmarshallerHandler(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 1e247c8eb858..b3332efbe2c1 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -58,8 +58,6 @@ import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.KeyMetadataAware; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -101,12 +99,13 @@ import java.util.OptionalLong; import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH; +import static javax.ws.rs.core.HttpHeaders.ETAG; import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; @@ -125,6 +124,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.PRECOND_FAILED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; @@ -134,6 +134,9 @@ import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CopyDirective; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_COUNT_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_DIRECTIVE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Utils.urlDecode; /** @@ -150,7 +153,7 @@ public class ObjectEndpoint extends EndpointBase { static { E_TAG_PROVIDER = ThreadLocal.withInitial(() -> { try { - return MessageDigest.getInstance("Md5"); + return MessageDigest.getInstance(OzoneConsts.MD5_HASH); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } @@ -195,8 +198,8 @@ public void init() { OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES); datastreamEnabled = ozoneConfiguration.getBoolean( - DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); + HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, + HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); datastreamMinLength = (long) ozoneConfiguration.getStorageSize( OZONE_FS_DATASTREAM_AUTO_THRESHOLD, OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT, StorageUnit.BYTES); @@ -216,13 +219,14 @@ public Response put( @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, - InputStream body) throws IOException, OS3Exception { + final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; PerformanceStringBuilder perf = new PerformanceStringBuilder(); String copyHeader = null, storageType = null; + DigestInputStream digestInputStream = null; try { OzoneVolume volume = getVolume(); if (uploadID != null && !uploadID.equals("")) { @@ -272,7 +276,9 @@ public Response put( boolean hasAmzDecodedLengthZero = amzDecodedLength != null && Long.parseLong(amzDecodedLength) == 0; if (canCreateDirectory && - (length == 0 || hasAmzDecodedLengthZero)) { + (length == 0 || hasAmzDecodedLengthZero) && + StringUtils.endsWith(keyPath, "/") + ) { s3GAction = S3GAction.CREATE_DIRECTORY; getClientProtocol() .createDirectory(volume.getName(), bucketName, keyPath); @@ -285,43 +291,37 @@ public Response put( // Normal put object Map customMetadata = getCustomMetadataFromHeaders(headers.getRequestHeaders()); - if (customMetadata.containsKey(ETAG) - || customMetadata.containsKey(ETAG.toLowerCase())) { - String customETag = customMetadata.get(ETAG) != null ? - customMetadata.get(ETAG) : customMetadata.get(ETAG.toLowerCase()); - customMetadata.remove(ETAG); - customMetadata.remove(ETAG.toLowerCase()); - customMetadata.put(ETAG_CUSTOM, customETag); - } if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" .equals(headers.getHeaderString("x-amz-content-sha256"))) { - body = new DigestInputStream(new SignedChunksInputStream(body), - E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(new SignedChunksInputStream(body), + getMessageDigestInstance()); length = Long.parseLong(amzDecodedLength); } else { - body = new DigestInputStream(body, E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(body, getMessageDigestInstance()); } + Map tags = getTaggingFromHeaders(headers); + long putLength; String eTag = null; if (datastreamEnabled && !enableEC && length > datastreamMinLength) { perf.appendStreamMode(); Pair keyWriteResult = ObjectEndpointStreaming .put(bucket, keyPath, length, replicationConfig, chunkSize, - customMetadata, (DigestInputStream) body, perf); + customMetadata, digestInputStream, perf); eTag = keyWriteResult.getKey(); putLength = keyWriteResult.getValue(); } else { try (OzoneOutputStream output = getClientProtocol().createKey( volume.getName(), bucketName, keyPath, length, replicationConfig, - customMetadata)) { + customMetadata, tags)) { long metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); - putLength = IOUtils.copyLarge(body, output); + putLength = IOUtils.copyLarge(digestInputStream, output); eTag = DatatypeConverter.printHexBinary( - ((DigestInputStream) body).getMessageDigest().digest()) + digestInputStream.getMessageDigest().digest()) .toLowerCase(); output.getMetadata().put(ETAG, eTag); } @@ -366,6 +366,11 @@ public Response put( } throw ex; } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (digestInputStream != null) { + digestInputStream.getMessageDigest().reset(); + } if (auditSuccess) { long opLatencyNs = getMetrics().updateCreateKeySuccessStats(startNanos); perf.appendOpLatencyNanos(opLatencyNs); @@ -482,9 +487,12 @@ public Response get( responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal); } responseBuilder - .header(ETAG, wrapInQuotes(keyDetails.getMetadata().get(ETAG))) .header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT); + if (keyDetails.getMetadata().get(ETAG) != null) { + responseBuilder.header(ETAG, wrapInQuotes(keyDetails.getMetadata().get(ETAG))); + } + // if multiple query parameters having same name, // Only the first parameters will be recognized // eg: @@ -509,6 +517,7 @@ public Response get( } } addLastModifiedDate(responseBuilder, keyDetails); + addTagCountIfAny(responseBuilder, keyDetails); long metadataLatencyNs = getMetrics().updateGetKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); @@ -550,6 +559,17 @@ static void addLastModifiedDate( RFC1123Util.FORMAT.format(lastModificationTime)); } + static void addTagCountIfAny( + ResponseBuilder responseBuilder, OzoneKey key) { + // See x-amz-tagging-count in https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + // The number of tags, IF ANY, on the object, when you have the relevant + // permission to read object tags + if (!key.getTags().isEmpty()) { + responseBuilder + .header(TAG_COUNT_HEADER, key.getTags().size()); + } + } + /** * Rest endpoint to check existence of an object in a bucket. *

    @@ -590,9 +610,16 @@ public Response head( } ResponseBuilder response = Response.ok().status(HttpStatus.SC_OK) - .header(ETAG, "" + wrapInQuotes(key.getMetadata().get(ETAG))) .header("Content-Length", key.getDataSize()) .header("Content-Type", "binary/octet-stream"); + + if (key.getMetadata().get(ETAG) != null) { + // Should not return ETag header if the ETag is not set + // doing so will result in "null" string being returned instead + // which breaks some AWS SDK implementation + response.header(ETAG, wrapInQuotes(key.getMetadata().get(ETAG))); + } + addLastModifiedDate(response, key); addCustomMetadataHeaders(response, key); getMetrics().updateHeadKeySuccessStats(startNanos); @@ -735,11 +762,16 @@ public Response initializeMultipartUpload( OzoneBucket ozoneBucket = getBucket(bucket); String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER); + Map customMetadata = + getCustomMetadataFromHeaders(headers.getRequestHeaders()); + + Map tags = getTaggingFromHeaders(headers); + ReplicationConfig replicationConfig = getReplicationConfig(ozoneBucket, storageType); OmMultipartInfo multipartInfo = - ozoneBucket.initiateMultipartUpload(key, replicationConfig); + ozoneBucket.initiateMultipartUpload(key, replicationConfig, customMetadata, tags); MultipartUploadInitiateResponse multipartUploadInitiateResponse = new MultipartUploadInitiateResponse(); @@ -771,7 +803,8 @@ public Response initializeMultipartUpload( private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, String storageType) throws OS3Exception { if (StringUtils.isEmpty(storageType)) { - storageType = S3StorageType.getDefault(ozoneConfiguration).toString(); + S3StorageType defaultStorageType = S3StorageType.getDefault(ozoneConfiguration); + storageType = (defaultStorageType != null ? defaultStorageType.toString() : null); } ReplicationConfig clientConfiguredReplicationConfig = null; @@ -807,7 +840,7 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; try { for (CompleteMultipartUploadRequest.Part part : partList) { - partsMap.put(part.getPartNumber(), part.geteTag()); + partsMap.put(part.getPartNumber(), part.getETag()); } if (LOG.isDebugEnabled()) { LOG.debug("Parts map {}", partsMap); @@ -867,20 +900,21 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) private Response createMultipartKey(OzoneVolume volume, String bucket, String key, long length, int partNumber, String uploadID, - InputStream body, PerformanceStringBuilder perf) + final InputStream body, PerformanceStringBuilder perf) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); String copyHeader = null; + DigestInputStream digestInputStream = null; try { if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" .equals(headers.getHeaderString("x-amz-content-sha256"))) { - body = new DigestInputStream(new SignedChunksInputStream(body), - E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(new SignedChunksInputStream(body), + getMessageDigestInstance()); length = Long.parseLong( headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)); } else { - body = new DigestInputStream(body, E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(body, getMessageDigestInstance()); } copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); @@ -900,13 +934,11 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, perf.appendStreamMode(); return ObjectEndpointStreaming .createMultipartKey(ozoneBucket, key, length, partNumber, - uploadID, chunkSize, (DigestInputStream) body, perf); + uploadID, chunkSize, digestInputStream, perf); } // OmMultipartCommitUploadPartInfo can only be gotten after the - // OzoneOutputStream is closed, so we need to save the KeyOutputStream - // in the OzoneOutputStream and use it to get the - // OmMultipartCommitUploadPartInfo after OzoneOutputStream is closed. - KeyOutputStream keyOutputStream = null; + // OzoneOutputStream is closed, so we need to save the OzoneOutputStream + final OzoneOutputStream outputStream; long metadataLatencyNs; if (copyHeader != null) { Pair result = parseSourceHeader(copyHeader); @@ -955,7 +987,9 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge( sourceObject, ozoneOutputStream, 0, length); - keyOutputStream = ozoneOutputStream.getKeyOutputStream(); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); + outputStream = ozoneOutputStream; } } else { try (OzoneOutputStream ozoneOutputStream = getClientProtocol() @@ -964,7 +998,9 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge(sourceObject, ozoneOutputStream); - keyOutputStream = ozoneOutputStream.getKeyOutputStream(); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); + outputStream = ozoneOutputStream; } } getMetrics().incCopyObjectSuccessLength(copyLength); @@ -977,23 +1013,26 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, partNumber, uploadID)) { metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); - putLength = IOUtils.copyLarge(body, ozoneOutputStream); - ((KeyMetadataAware)ozoneOutputStream.getOutputStream()) - .getMetadata().put(ETAG, DatatypeConverter.printHexBinary( - ((DigestInputStream) body).getMessageDigest().digest()) - .toLowerCase()); - keyOutputStream - = ozoneOutputStream.getKeyOutputStream(); + putLength = IOUtils.copyLarge(digestInputStream, ozoneOutputStream); + byte[] digest = digestInputStream.getMessageDigest().digest(); + ozoneOutputStream.getMetadata() + .put(ETAG, DatatypeConverter.printHexBinary(digest).toLowerCase()); + outputStream = ozoneOutputStream; } getMetrics().incPutKeySuccessLength(putLength); perf.appendSizeBytes(putLength); } perf.appendMetaLatencyNanos(metadataLatencyNs); - assert keyOutputStream != null; OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = - keyOutputStream.getCommitUploadPartInfo(); - String eTag = omMultipartCommitUploadPartInfo.getPartName(); + outputStream.getCommitUploadPartInfo(); + String eTag = omMultipartCommitUploadPartInfo.getETag(); + // If the OmMultipartCommitUploadPartInfo does not contain eTag, + // fall back to MPU part name for compatibility in case the (old) OM + // does not return the eTag field + if (StringUtils.isEmpty(eTag)) { + eTag = omMultipartCommitUploadPartInfo.getPartName(); + } if (copyHeader != null) { getMetrics().updateCopyObjectSuccessStats(startNanos); @@ -1020,6 +1059,12 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, throw os3Exception; } throw ex; + } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (digestInputStream != null) { + digestInputStream.getMessageDigest().reset(); + } } } @@ -1064,7 +1109,10 @@ private Response listParts(String bucket, String key, String uploadID, ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> { ListPartsResponse.Part part = new ListPartsResponse.Part(); part.setPartNumber(partInfo.getPartNumber()); - part.setETag(partInfo.getPartName()); + // If the ETag field does not exist, use MPU part name for backward + // compatibility + part.setETag(StringUtils.isNotEmpty(partInfo.getETag()) ? + partInfo.getETag() : partInfo.getPartName()); part.setSize(partInfo.getSize()); part.setLastModified(Instant.ofEpochMilli( partInfo.getModificationTime())); @@ -1097,11 +1145,12 @@ public void setContext(ContainerRequestContext context) { } @SuppressWarnings("checkstyle:ParameterNumber") - void copy(OzoneVolume volume, InputStream src, long srcKeyLen, + void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, String destKey, String destBucket, ReplicationConfig replication, Map metadata, - PerformanceStringBuilder perf, long startNanos) + PerformanceStringBuilder perf, long startNanos, + Map tags) throws IOException { long copyLength; if (datastreamEnabled && !(replication != null && @@ -1110,15 +1159,17 @@ void copy(OzoneVolume volume, InputStream src, long srcKeyLen, perf.appendStreamMode(); copyLength = ObjectEndpointStreaming .copyKeyWithStream(volume.getBucket(destBucket), destKey, srcKeyLen, - chunkSize, replication, metadata, src, perf, startNanos); + chunkSize, replication, metadata, src, perf, startNanos, tags); } else { try (OzoneOutputStream dest = getClientProtocol() .createKey(volume.getName(), destBucket, destKey, srcKeyLen, - replication, metadata)) { + replication, metadata, tags)) { long metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); copyLength = IOUtils.copyLarge(src, dest); + String eTag = DatatypeConverter.printHexBinary(src.getMessageDigest().digest()).toLowerCase(); + dest.getMetadata().put(ETAG, eTag); } } getMetrics().incCopyObjectSuccessLength(copyLength); @@ -1136,9 +1187,11 @@ private CopyObjectResponse copyObject(OzoneVolume volume, String sourceBucket = result.getLeft(); String sourceKey = result.getRight(); + DigestInputStream sourceDigestInputStream = null; try { + OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( + volume.getName(), sourceBucket, sourceKey); // Checking whether we trying to copying to it self. - if (sourceBucket.equals(destBucket) && sourceKey .equals(destkey)) { // When copying to same storage type when storage type is provided, @@ -1157,22 +1210,54 @@ private CopyObjectResponse copyObject(OzoneVolume volume, // still does not support this just returning dummy response // for now CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); + copyObjectResponse.setETag(wrapInQuotes(sourceKeyDetails.getMetadata().get(ETAG))); copyObjectResponse.setLastModified(Instant.ofEpochMilli( Time.now())); return copyObjectResponse; } } - - OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( - volume.getName(), sourceBucket, sourceKey); long sourceKeyLen = sourceKeyDetails.getDataSize(); + // Object tagging in copyObject with tagging directive + Map tags; + String tagCopyDirective = headers.getHeaderString(TAG_DIRECTIVE_HEADER); + if (StringUtils.isEmpty(tagCopyDirective) || tagCopyDirective.equals(CopyDirective.COPY.name())) { + // Tag-set will be copied from the source directly + tags = sourceKeyDetails.getTags(); + } else if (tagCopyDirective.equals(CopyDirective.REPLACE.name())) { + // Replace the tags with the tags from the request headers + tags = getTaggingFromHeaders(headers); + } else { + OS3Exception ex = newError(INVALID_ARGUMENT, tagCopyDirective); + ex.setErrorMessage("An error occurred (InvalidArgument) " + + "when calling the CopyObject operation: " + + "The tagging copy directive specified is invalid. Valid values are COPY or REPLACE."); + throw ex; + } + + // Custom metadata in copyObject with metadata directive + Map customMetadata; + String metadataCopyDirective = headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER); + if (StringUtils.isEmpty(metadataCopyDirective) || metadataCopyDirective.equals(CopyDirective.COPY.name())) { + // The custom metadata will be copied from the source key + customMetadata = sourceKeyDetails.getMetadata(); + } else if (metadataCopyDirective.equals(CopyDirective.REPLACE.name())) { + // Replace the metadata with the metadata form the request headers + customMetadata = getCustomMetadataFromHeaders(headers.getRequestHeaders()); + } else { + OS3Exception ex = newError(INVALID_ARGUMENT, metadataCopyDirective); + ex.setErrorMessage("An error occurred (InvalidArgument) " + + "when calling the CopyObject operation: " + + "The metadata copy directive specified is invalid. Valid values are COPY or REPLACE."); + throw ex; + } + try (OzoneInputStream src = getClientProtocol().getKey(volume.getName(), sourceBucket, sourceKey)) { getMetrics().updateCopyKeyMetadataStats(startNanos); - copy(volume, src, sourceKeyLen, destkey, destBucket, replicationConfig, - sourceKeyDetails.getMetadata(), perf, startNanos); + sourceDigestInputStream = new DigestInputStream(src, getMessageDigestInstance()); + copy(volume, sourceDigestInputStream, sourceKeyLen, destkey, destBucket, replicationConfig, + customMetadata, perf, startNanos, tags); } final OzoneKeyDetails destKeyDetails = getClientProtocol().getKeyDetails( @@ -1180,7 +1265,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume, getMetrics().updateCopyObjectSuccessStats(startNanos); CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); + copyObjectResponse.setETag(wrapInQuotes(destKeyDetails.getMetadata().get(ETAG))); copyObjectResponse.setLastModified(destKeyDetails.getModificationTime()); return copyObjectResponse; } catch (OMException ex) { @@ -1193,6 +1278,12 @@ private CopyObjectResponse copyObject(OzoneVolume volume, destBucket + "/" + destkey, ex); } throw ex; + } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (sourceDigestInputStream != null) { + sourceDigestInputStream.getMessageDigest().reset(); + } } } @@ -1289,8 +1380,13 @@ public boolean isDatastreamEnabled() { return datastreamEnabled; } - private String wrapInQuotes(String value) { + static String wrapInQuotes(String value) { return "\"" + value + "\""; } + @VisibleForTesting + public MessageDigest getMessageDigestInstance() { + return E_TAG_PROVIDER.get(); + } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index e509acb05bdb..cb9499aa20d8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -21,12 +21,11 @@ import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; @@ -110,7 +109,7 @@ public static Pair putKeyWithStream( eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) .toLowerCase(); perf.appendMetaLatencyNanos(metadataLatencyNs); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); } return Pair.of(eTag, writeLen); } @@ -123,15 +122,19 @@ public static long copyKeyWithStream( int bufferSize, ReplicationConfig replicationConfig, Map keyMetadata, - InputStream body, PerformanceStringBuilder perf, long startNanos) + DigestInputStream body, PerformanceStringBuilder perf, long startNanos, + Map tags) throws IOException { - long writeLen = 0; + long writeLen; try (OzoneDataStreamOutput streamOutput = bucket.createStreamKey(keyPath, - length, replicationConfig, keyMetadata)) { + length, replicationConfig, keyMetadata, tags)) { long metadataLatencyNs = METRICS.updateCopyKeyMetadataStats(startNanos); - perf.appendMetaLatencyNanos(metadataLatencyNs); writeLen = writeToStreamOutput(streamOutput, body, bufferSize, length); + String eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) + .toLowerCase(); + perf.appendMetaLatencyNanos(metadataLatencyNs); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); } return writeLen; } @@ -161,11 +164,6 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); String eTag; - // OmMultipartCommitUploadPartInfo can only be gotten after the - // OzoneDataStreamOutput is closed, so we need to save the - // KeyDataStreamOutput in the OzoneDataStreamOutput and use it to get the - // OmMultipartCommitUploadPartInfo after OzoneDataStreamOutput is closed. - KeyDataStreamOutput keyDataStreamOutput = null; try { try (OzoneDataStreamOutput streamOutput = ozoneBucket .createMultipartStreamKey(key, length, partNumber, uploadID)) { @@ -174,11 +172,10 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, writeToStreamOutput(streamOutput, body, chunkSize, length); eTag = DatatypeConverter.printHexBinary( body.getMessageDigest().digest()).toLowerCase(); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); METRICS.incPutKeySuccessLength(putLength); perf.appendMetaLatencyNanos(metadataLatencyNs); perf.appendSizeBytes(putLength); - keyDataStreamOutput = streamOutput.getKeyDataStreamOutput(); } } catch (OMException ex) { if (ex.getResult() == @@ -190,13 +187,7 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, ozoneBucket.getName() + "/" + key); } throw ex; - } finally { - if (keyDataStreamOutput != null) { - OmMultipartCommitUploadPartInfo commitUploadPartInfo = - keyDataStreamOutput.getCommitUploadPartInfo(); - eTag = commitUploadPartInfo.getPartName(); - } } - return Response.ok().header("ETag", eTag).build(); + return Response.ok().header(OzoneConsts.ETAG, eTag).build(); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java index 3ca2e47c469e..3fa6149815ea 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java @@ -44,14 +44,13 @@ public class PutBucketAclRequestUnmarshaller implements MessageBodyReader { private final JAXBContext context; - private final XMLReader xmlReader; + private final SAXParserFactory saxParserFactory; public PutBucketAclRequestUnmarshaller() { try { context = JAXBContext.newInstance(S3BucketAcl.class); - SAXParserFactory saxParserFactory = SAXParserFactory.newInstance(); + saxParserFactory = SAXParserFactory.newInstance(); saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); - xmlReader = saxParserFactory.newSAXParser().getXMLReader(); } catch (Exception ex) { throw new AssertionError("Can not instantiate " + "PutBucketAclRequest parser", ex); @@ -70,6 +69,7 @@ public S3BucketAcl readFrom( MultivaluedMap multivaluedMap, InputStream inputStream) throws IOException, WebApplicationException { try { + XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); UnmarshallerHandler unmarshallerHandler = context.createUnmarshaller().getUnmarshallerHandler(); XmlNamespaceFilter filter = diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java index 792f2e2ef5e9..5d6057f061b4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java @@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; -import java.util.BitSet; +import java.util.EnumSet; import java.util.List; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT; @@ -228,15 +228,15 @@ public static List s3AclToOzoneNativeAclOnBucket( grant.getGrantee().getXsiType()); if (identityType != null && identityType.isSupported()) { String permission = grant.getPermission(); - BitSet acls = getOzoneAclOnBucketFromS3Permission(permission); + EnumSet acls = getOzoneAclOnBucketFromS3Permission(permission); OzoneAcl defaultOzoneAcl = new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - grant.getGrantee().getId(), acls, - OzoneAcl.AclScope.DEFAULT); + grant.getGrantee().getId(), OzoneAcl.AclScope.DEFAULT, acls + ); OzoneAcl accessOzoneAcl = new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - grant.getGrantee().getId(), acls, - OzoneAcl.AclScope.ACCESS); + grant.getGrantee().getId(), OzoneAcl.AclScope.ACCESS, acls + ); ozoneAclList.add(defaultOzoneAcl); ozoneAclList.add(accessOzoneAcl); } else { @@ -249,31 +249,31 @@ public static List s3AclToOzoneNativeAclOnBucket( return ozoneAclList; } - public static BitSet getOzoneAclOnBucketFromS3Permission(String permission) + public static EnumSet getOzoneAclOnBucketFromS3Permission(String permission) throws OS3Exception { ACLType permissionType = ACLType.getType(permission); if (permissionType == null) { throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, permission); } - BitSet acls = new BitSet(IAccessAuthorizer.ACLType.getNoOfAcls()); + EnumSet acls = EnumSet.noneOf(IAccessAuthorizer.ACLType.class); switch (permissionType) { case FULL_CONTROL: - acls.set(IAccessAuthorizer.ACLType.ALL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.ALL); break; case WRITE_ACP: - acls.set(IAccessAuthorizer.ACLType.WRITE_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.WRITE_ACL); break; case READ_ACP: - acls.set(IAccessAuthorizer.ACLType.READ_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ_ACL); break; case WRITE: - acls.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - acls.set(IAccessAuthorizer.ACLType.DELETE.ordinal()); - acls.set(IAccessAuthorizer.ACLType.CREATE.ordinal()); + acls.add(IAccessAuthorizer.ACLType.WRITE); + acls.add(IAccessAuthorizer.ACLType.DELETE); + acls.add(IAccessAuthorizer.ACLType.CREATE); break; case READ: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.LIST.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.LIST); break; default: LOG.error("Failed to recognize S3 permission {}", permission); @@ -292,11 +292,11 @@ public static List s3AclToOzoneNativeAclOnVolume( grant.getGrantee().getXsiType()); if (identityType != null && identityType.isSupported()) { String permission = grant.getPermission(); - BitSet acls = getOzoneAclOnVolumeFromS3Permission(permission); + EnumSet acls = getOzoneAclOnVolumeFromS3Permission(permission); OzoneAcl accessOzoneAcl = new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - grant.getGrantee().getId(), acls, - OzoneAcl.AclScope.ACCESS); + grant.getGrantee().getId(), OzoneAcl.AclScope.ACCESS, acls + ); ozoneAclList.add(accessOzoneAcl); } else { LOG.error("Grantee type {} is not supported", @@ -309,35 +309,35 @@ public static List s3AclToOzoneNativeAclOnVolume( } // User privilege on volume follows the "lest privilege" principle. - public static BitSet getOzoneAclOnVolumeFromS3Permission(String permission) + public static EnumSet getOzoneAclOnVolumeFromS3Permission(String permission) throws OS3Exception { - BitSet acls = new BitSet(IAccessAuthorizer.ACLType.getNoOfAcls()); + EnumSet acls = EnumSet.noneOf(IAccessAuthorizer.ACLType.class); ACLType permissionType = ACLType.getType(permission); if (permissionType == null) { throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, permission); } switch (permissionType) { case FULL_CONTROL: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - acls.set(IAccessAuthorizer.ACLType.READ_ACL.ordinal()); - acls.set(IAccessAuthorizer.ACLType.WRITE_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.WRITE); + acls.add(IAccessAuthorizer.ACLType.READ_ACL); + acls.add(IAccessAuthorizer.ACLType.WRITE_ACL); break; case WRITE_ACP: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.READ_ACL.ordinal()); - acls.set(IAccessAuthorizer.ACLType.WRITE_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.READ_ACL); + acls.add(IAccessAuthorizer.ACLType.WRITE_ACL); break; case READ_ACP: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.READ_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.READ_ACL); break; case WRITE: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.WRITE); break; case READ: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); break; default: LOG.error("Failed to recognize S3 permission {}", permission); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index 763c2d6be5fa..42c044086b87 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -139,6 +139,12 @@ private S3ErrorTable() { "BucketAlreadyExists", "The requested bucket name is not available" + " as it already exists.", HTTP_CONFLICT); + public static final OS3Exception INVALID_TAG = new OS3Exception( + "InvalidTag", "Your request contains tag input that is not valid.", HTTP_BAD_REQUEST); + + public static final OS3Exception NO_SUCH_TAG_SET = new OS3Exception( + "NoSuchTagSet", "The specified tag does not exist.", HTTP_NOT_FOUND); + public static OS3Exception newError(OS3Exception e, String resource) { return newError(e, resource, null); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index df3d01936b18..3a29bac2268d 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -62,10 +62,32 @@ private S3Consts() { public static final String S3_XML_NAMESPACE = "http://s3.amazonaws" + ".com/doc/2006-03-01/"; + // Constants related to custom metadata public static final String CUSTOM_METADATA_HEADER_PREFIX = "x-amz-meta-"; + public static final String CUSTOM_METADATA_COPY_DIRECTIVE_HEADER = "x-amz-metadata-directive"; public static final String DECODED_CONTENT_LENGTH_HEADER = "x-amz-decoded-content-length"; + // Constants related to S3 tags + public static final String TAG_HEADER = "x-amz-tagging"; + public static final String TAG_DIRECTIVE_HEADER = "x-amz-tagging-directive"; + public static final String TAG_COUNT_HEADER = "x-amz-tagging-count"; + + public static final int TAG_NUM_LIMIT = 10; + public static final int TAG_KEY_LENGTH_LIMIT = 128; + public static final int TAG_VALUE_LENGTH_LIMIT = 256; + // See https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_S3Tag.html + // Also see https://docs.aws.amazon.com/directoryservice/latest/devguide/API_Tag.html for Java regex equivalent + public static final Pattern TAG_REGEX_PATTERN = Pattern.compile("^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"); + + /** + * Copy directive for metadata and tags. + */ + public enum CopyDirective { + COPY, // Default directive + REPLACE + } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java index ae42e812fb3e..9eb88989a32e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java @@ -62,6 +62,10 @@ public ReplicationType getType() { public static S3StorageType getDefault(ConfigurationSource config) { String replicationString = config.get(OzoneConfigKeys.OZONE_REPLICATION); ReplicationFactor configFactor; + if (replicationString == null) { + // if no config is set then let server take decision + return null; + } try { configFactor = ReplicationFactor.valueOf( Integer.parseInt(replicationString)); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 7515d991eba0..c675a9ba6aa5 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -56,6 +56,7 @@ import java.io.IOException; import java.net.URI; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -226,6 +227,16 @@ public OzoneOutputStream createKey(String volumeName, String bucketName, .createKey(keyName, size, replicationConfig, metadata); } + @Override + public OzoneOutputStream createKey(String volumeName, String bucketName, + String keyName, long size, + ReplicationConfig replicationConfig, + Map metadata, + Map tags) throws IOException { + return getBucket(volumeName, bucketName) + .createKey(keyName, size, replicationConfig, metadata, tags); + } + @Override public OzoneInputStream getKey(String volumeName, String bucketName, String keyName) throws IOException { @@ -309,8 +320,24 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, public OmMultipartInfo initiateMultipartUpload(String volumeName, String bucketName, String keyName, ReplicationConfig replicationConfig) throws IOException { + return initiateMultipartUpload(volumeName, bucketName, keyName, replicationConfig, Collections.emptyMap()); + } + + @Override + public OmMultipartInfo initiateMultipartUpload(String volumeName, + String bucketName, String keyName, ReplicationConfig replicationConfig, + Map metadata) + throws IOException { return getBucket(volumeName, bucketName) - .initiateMultipartUpload(keyName, replicationConfig); + .initiateMultipartUpload(keyName, replicationConfig, metadata); + } + + @Override + public OmMultipartInfo initiateMultipartUpload(String volumeName, + String bucketName, String keyName, ReplicationConfig replicationConfig, + Map metadata, Map tags) throws IOException { + return getBucket(volumeName, bucketName) + .initiateMultipartUpload(keyName, replicationConfig, metadata, tags); } @Override @@ -482,7 +509,7 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, @Override public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { - + getBucket(volumeName, bucketName).createDirectory(keyName); } @Override @@ -628,6 +655,14 @@ public OzoneDataStreamOutput createStreamKey( return null; } + @Override + public OzoneDataStreamOutput createStreamKey( + String volumeName, String bucketName, String keyName, long size, + ReplicationConfig replicationConfig, Map metadata, + Map tags) throws IOException { + return null; + } + @Override public OzoneDataStreamOutput createMultipartStreamKey( String volumeName, String bucketName, String keyName, long size, @@ -650,6 +685,13 @@ public String createSnapshot(String volumeName, return ""; } + @Override + public void renameSnapshot(String volumeName, String bucketName, + String snapshotOldName, String snapshotNewName) + throws IOException { + + } + @Override public List listSnapshot( String volumeName, String bucketName, String snapshotPrefix, diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java index b79e49f834cb..e9fb15e613fe 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java @@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.client; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -62,7 +61,7 @@ public void createVolume(String volumeName) throws IOException { .setAdmin("root") .setOwner("root") .setQuotaInBytes(Integer.MAX_VALUE) - .setAcls(new ArrayList<>()).build()); + .build()); } @Override diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index fad3386c61c4..22b002945eb9 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -23,7 +23,10 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -32,13 +35,13 @@ import java.util.UUID; import java.util.stream.Collectors; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.ozone.OzoneAcl; @@ -51,20 +54,27 @@ import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; /** * In-memory ozone bucket for testing. */ -public class OzoneBucketStub extends OzoneBucket { +public final class OzoneBucketStub extends OzoneBucket { + + private static final Logger LOG = LoggerFactory.getLogger(OzoneBucketStub.class); private Map keyDetails = new HashMap<>(); private Map keyContents = new HashMap<>(); - private Map multipartUploadIdMap = new HashMap<>(); + private Map keyToMultipartUpload = new HashMap<>(); private Map> partList = new HashMap<>(); @@ -75,7 +85,7 @@ public static Builder newBuilder() { return new Builder(); } - public OzoneBucketStub(Builder b) { + private OzoneBucketStub(Builder b) { super(b); this.replicationConfig = super.getReplicationConfig(); } @@ -88,43 +98,6 @@ public static final class Builder extends OzoneBucket.Builder { private Builder() { } - @Override - public Builder setVolumeName(String volumeName) { - super.setVolumeName(volumeName); - return this; - } - - @Override - public Builder setName(String name) { - super.setName(name); - return this; - } - - @Override - public Builder setDefaultReplicationConfig( - DefaultReplicationConfig defaultReplicationConfig) { - super.setDefaultReplicationConfig(defaultReplicationConfig); - return this; - } - - @Override - public Builder setStorageType(StorageType storageType) { - super.setStorageType(storageType); - return this; - } - - @Override - public Builder setVersioning(Boolean versioning) { - super.setVersioning(versioning); - return this; - } - - @Override - public Builder setCreationTime(long creationTime) { - super.setCreationTime(creationTime); - return this; - } - @Override public OzoneBucketStub build() { return new OzoneBucketStub(this); @@ -144,31 +117,17 @@ public OzoneOutputStream createKey(String key, long size, ReplicationFactor factor, Map metadata) throws IOException { - ByteArrayOutputStream byteArrayOutputStream = - new ByteArrayOutputStream((int) size) { - @Override - public void close() throws IOException { - keyContents.put(key, toByteArray()); - keyDetails.put(key, new OzoneKeyDetails( - getVolumeName(), - getName(), - key, - size, - System.currentTimeMillis(), - System.currentTimeMillis(), - new ArrayList<>(), replicationConfig, metadata, null, - () -> readKey(key), true - )); - super.close(); - } - }; - return new OzoneOutputStream(byteArrayOutputStream, null); + ReplicationConfig replication = ReplicationConfig.fromTypeAndFactor(type, factor); + return createKey(key, size, replication, metadata); } @Override public OzoneOutputStream createKey(String key, long size, - ReplicationConfig rConfig, Map metadata) + ReplicationConfig rConfig, Map metadata, + Map tags) throws IOException { + assertDoesNotExist(key + "/"); + final ReplicationConfig repConfig; if (rConfig == null) { repConfig = getReplicationConfig(); @@ -189,7 +148,9 @@ public void close() throws IOException { System.currentTimeMillis(), System.currentTimeMillis(), new ArrayList<>(), finalReplicationCon, metadata, null, - () -> readKey(key), true + () -> readKey(key), true, + UserGroupInformation.getCurrentUser().getShortUserName(), + tags )); super.close(); } @@ -201,10 +162,13 @@ public void close() throws IOException { @Override public OzoneDataStreamOutput createStreamKey(String key, long size, ReplicationConfig rConfig, - Map keyMetadata) + Map keyMetadata, + Map tags) throws IOException { + assertDoesNotExist(key + "/"); + ByteBufferStreamOutput byteBufferStreamOutput = - new ByteBufferStreamOutput() { + new KeyMetadataAwareByteBufferStreamOutput(keyMetadata) { private final ByteBuffer buffer = ByteBuffer.allocate((int) size); @@ -226,7 +190,9 @@ public void close() throws IOException { System.currentTimeMillis(), System.currentTimeMillis(), new ArrayList<>(), rConfig, objectMetadata, null, - null, false + null, false, + UserGroupInformation.getCurrentUser().getShortUserName(), + tags )); } @@ -252,8 +218,8 @@ public OzoneDataStreamOutput createMultipartStreamKey(String key, int partNumber, String uploadID) throws IOException { - String multipartUploadID = multipartUploadIdMap.get(key); - if (multipartUploadID == null || !multipartUploadID.equals(uploadID)) { + MultipartInfoStub multipartInfo = keyToMultipartUpload.get(key); + if (multipartInfo == null || !multipartInfo.getUploadId().equals(uploadID)) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } else { ByteBufferStreamOutput byteBufferStreamOutput = @@ -267,7 +233,8 @@ public void close() throws IOException { byte[] bytes = new byte[position]; buffer.get(bytes); - Part part = new Part(key + size, bytes); + Part part = new Part(key + size, bytes, + getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -316,7 +283,10 @@ public OzoneKey headObject(String key) throws IOException { ozoneKeyDetails.getCreationTime().toEpochMilli(), ozoneKeyDetails.getModificationTime().toEpochMilli(), ozoneKeyDetails.getReplicationConfig(), - ozoneKeyDetails.isFile()); + ozoneKeyDetails.getMetadata(), + ozoneKeyDetails.isFile(), + ozoneKeyDetails.getOwner(), + ozoneKeyDetails.getTags()); } else { throw new OMException(ResultCodes.KEY_NOT_FOUND); } @@ -371,7 +341,7 @@ public Iterator listKeys(String keyPrefix, key.getDataSize(), key.getCreationTime().getEpochSecond() * 1000, key.getModificationTime().getEpochSecond() * 1000, - key.getReplicationConfig(), key.isFile()); + key.getReplicationConfig(), key.isFile(), key.getOwner()); }).collect(Collectors.toList()); if (prevKey != null) { @@ -399,16 +369,22 @@ public OmMultipartInfo initiateMultipartUpload(String keyName, ReplicationType type, ReplicationFactor factor) throws IOException { - String uploadID = UUID.randomUUID().toString(); - multipartUploadIdMap.put(keyName, uploadID); - return new OmMultipartInfo(getVolumeName(), getName(), keyName, uploadID); + return initiateMultipartUpload(keyName, ReplicationConfig.fromTypeAndFactor(type, factor), + Collections.emptyMap()); } @Override public OmMultipartInfo initiateMultipartUpload(String keyName, ReplicationConfig repConfig) throws IOException { + return initiateMultipartUpload(keyName, repConfig, Collections.emptyMap()); + } + + @Override + public OmMultipartInfo initiateMultipartUpload(String keyName, + ReplicationConfig config, Map metadata, Map tags) + throws IOException { String uploadID = UUID.randomUUID().toString(); - multipartUploadIdMap.put(keyName, uploadID); + keyToMultipartUpload.put(keyName, new MultipartInfoStub(uploadID, metadata, tags)); return new OmMultipartInfo(getVolumeName(), getName(), keyName, uploadID); } @@ -416,8 +392,8 @@ public OmMultipartInfo initiateMultipartUpload(String keyName, public OzoneOutputStream createMultipartKey(String key, long size, int partNumber, String uploadID) throws IOException { - String multipartUploadID = multipartUploadIdMap.get(key); - if (multipartUploadID == null || !multipartUploadID.equals(uploadID)) { + MultipartInfoStub multipartInfo = keyToMultipartUpload.get(key); + if (multipartInfo == null || !multipartInfo.getUploadId().equals(uploadID)) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } else { ByteArrayOutputStream byteArrayOutputStream = @@ -425,7 +401,7 @@ public OzoneOutputStream createMultipartKey(String key, long size, @Override public void close() throws IOException { Part part = new Part(key + size, - toByteArray()); + toByteArray(), getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -443,13 +419,11 @@ public void close() throws IOException { public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, String uploadID, Map partsMap) throws IOException { - if (multipartUploadIdMap.get(key) == null) { + if (keyToMultipartUpload.get(key) == null) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } else { final Map partsList = partList.get(key); - int count = 1; - ByteArrayOutputStream output = new ByteArrayOutputStream(); int prevPartNumber = 0; @@ -463,13 +437,27 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, for (Map.Entry part: partsMap.entrySet()) { Part recordedPart = partsList.get(part.getKey()); if (recordedPart == null || - !recordedPart.getPartName().equals(part.getValue())) { + !recordedPart.getETag().equals(part.getValue())) { throw new OMException(ResultCodes.INVALID_PART); } else { output.write(recordedPart.getContent()); } keyContents.put(key, output.toByteArray()); } + + keyDetails.put(key, new OzoneKeyDetails( + getVolumeName(), + getName(), + key, + keyContents.get(key) != null ? keyContents.get(key).length : 0, + System.currentTimeMillis(), + System.currentTimeMillis(), + new ArrayList<>(), getReplicationConfig(), + keyToMultipartUpload.get(key).getMetadata(), null, + () -> readKey(key), true, + UserGroupInformation.getCurrentUser().getShortUserName(), + keyToMultipartUpload.get(key).getTags() + )); } return new OmMultipartUploadCompleteInfo(getVolumeName(), getName(), key, @@ -479,17 +467,17 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, @Override public void abortMultipartUpload(String keyName, String uploadID) throws IOException { - if (multipartUploadIdMap.get(keyName) == null) { + if (keyToMultipartUpload.get(keyName) == null) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } else { - multipartUploadIdMap.remove(keyName); + keyToMultipartUpload.remove(keyName); } } @Override public OzoneMultipartUploadPartListParts listParts(String key, String uploadID, int partNumberMarker, int maxParts) throws IOException { - if (multipartUploadIdMap.get(key) == null) { + if (keyToMultipartUpload.get(key) == null) { throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } List partInfoList = new ArrayList<>(); @@ -506,13 +494,21 @@ public OzoneMultipartUploadPartListParts listParts(String key, int count = 0; int nextPartNumberMarker = 0; boolean truncated = false; + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance(MD5_HASH); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } while (count < maxParts && partIterator.hasNext()) { Map.Entry partEntry = partIterator.next(); nextPartNumberMarker = partEntry.getKey(); if (partEntry.getKey() > partNumberMarker) { PartInfo partInfo = new PartInfo(partEntry.getKey(), partEntry.getValue().getPartName(), - Time.now(), partEntry.getValue().getContent().length); + Time.now(), partEntry.getValue().getContent().length, + DatatypeConverter.printHexBinary(eTagProvider.digest(partEntry + .getValue().getContent())).toLowerCase()); partInfoList.add(partInfo); count++; } @@ -563,9 +559,12 @@ public static class Part { private String partName; private byte[] content; - public Part(String name, byte[] data) { + private String eTag; + + public Part(String name, byte[] data, String eTag) { this.partName = name; this.content = data.clone(); + this.eTag = eTag; } public String getPartName() { @@ -575,6 +574,11 @@ public String getPartName() { public byte[] getContent() { return content.clone(); } + + public String getETag() { + return eTag; + } + } @Override @@ -589,6 +593,9 @@ public ReplicationConfig getReplicationConfig() { @Override public void createDirectory(String keyName) throws IOException { + assertDoesNotExist(StringUtils.stripEnd(keyName, "/")); + + LOG.info("createDirectory({})", keyName); keyDetails.put(keyName, new OzoneKeyDetails( getVolumeName(), getName(), @@ -597,7 +604,15 @@ public void createDirectory(String keyName) throws IOException { System.currentTimeMillis(), System.currentTimeMillis(), new ArrayList<>(), replicationConfig, new HashMap<>(), null, - () -> readKey(keyName), false)); + () -> readKey(keyName), false, + UserGroupInformation.getCurrentUser().getShortUserName(), + Collections.emptyMap())); + } + + private void assertDoesNotExist(String keyName) throws OMException { + if (keyDetails.get(keyName) != null) { + throw new OMException("already exists", ResultCodes.FILE_ALREADY_EXISTS); + } } /** @@ -658,4 +673,33 @@ public Map getMetadata() { } } + /** + * Multipart upload stub to store MPU related information. + */ + private static class MultipartInfoStub { + + private final String uploadId; + private final Map metadata; + private final Map tags; + + MultipartInfoStub(String uploadId, Map metadata, + Map tags) { + this.uploadId = uploadId; + this.metadata = metadata; + this.tags = tags; + } + + public String getUploadId() { + return uploadId; + } + + public Map getMetadata() { + return metadata; + } + + public Map getTags() { + return tags; + } + } + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java index 7bb35682d8da..b472320b7fe7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java @@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.client; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -65,6 +66,7 @@ public synchronized void close() throws IOException { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + getMetadata().get(OzoneConsts.ETAG)) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java index 983516002909..5f1eaae28165 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java @@ -20,11 +20,8 @@ package org.apache.hadoop.ozone.client; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.StreamBufferArgs; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -74,26 +71,9 @@ public synchronized void close() throws IOException { } } - @Override - public KeyOutputStream getKeyOutputStream() { - OzoneConfiguration conf = new OzoneConfiguration(); - ReplicationConfig replicationConfig = - ReplicationConfig.getDefault(conf); - OzoneClientConfig ozoneClientConfig = conf.getObject(OzoneClientConfig.class); - StreamBufferArgs streamBufferArgs = - StreamBufferArgs.getDefaultStreamBufferArgs(replicationConfig, ozoneClientConfig); - return new KeyOutputStream(replicationConfig, null, ozoneClientConfig, streamBufferArgs) { - @Override - public synchronized OmMultipartCommitUploadPartInfo - getCommitUploadPartInfo() { - return OzoneOutputStreamStub.this.getCommitUploadPartInfo(); - } - }; - } - @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + ((KeyMetadataAware)getOutputStream()).getMetadata().get(OzoneConsts.ETAG)) : null; } - } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java index 9fab5a181b56..4ce18b41f1cf 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java @@ -38,17 +38,17 @@ /** * Ozone volume with in-memory state for testing. */ -public class OzoneVolumeStub extends OzoneVolume { +public final class OzoneVolumeStub extends OzoneVolume { - private Map buckets = new HashMap<>(); + private final Map buckets = new HashMap<>(); - private ArrayList aclList = new ArrayList<>(); + private final ArrayList aclList = new ArrayList<>(); public static Builder newBuilder() { return new Builder(); } - public OzoneVolumeStub(Builder b) { + private OzoneVolumeStub(Builder b) { super(b); } @@ -124,6 +124,7 @@ public void createBucket(String bucketName, BucketArgs bucketArgs) { .setDefaultReplicationConfig(new DefaultReplicationConfig( RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.THREE))) + .setBucketLayout(bucketArgs.getBucketLayout()) .setStorageType(bucketArgs.getStorageType()) .setVersioning(bucketArgs.getVersioning()) .setCreationTime(Time.now()) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java index 9a36a40601b7..638ac73ebdc0 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java @@ -26,8 +26,9 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.commontypes.EncodingTypeObject; import org.apache.hadoop.ozone.s3.exception.OS3Exception; - import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.security.UserGroupInformation; + import org.junit.jupiter.api.Test; import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE; @@ -118,6 +119,37 @@ public void listSubDir() throws OS3Exception, IOException { } + @Test + public void listObjectOwner() throws OS3Exception, IOException { + + UserGroupInformation user1 = UserGroupInformation + .createUserForTesting("user1", new String[] {"user1"}); + UserGroupInformation user2 = UserGroupInformation + .createUserForTesting("user2", new String[] {"user2"}); + + BucketEndpoint getBucket = new BucketEndpoint(); + OzoneClient client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket("b1"); + OzoneBucket bucket = client.getObjectStore().getS3Bucket("b1"); + + UserGroupInformation.setLoginUser(user1); + bucket.createKey("key1", 0).close(); + UserGroupInformation.setLoginUser(user2); + bucket.createKey("key2", 0).close(); + + getBucket.setClient(client); + ListObjectResponse getBucketResponse = + (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, + "key", null, null, null, null, null).getEntity(); + + assertEquals(2, getBucketResponse.getContents().size()); + assertEquals(user1.getShortUserName(), + getBucketResponse.getContents().get(0).getOwner().getDisplayName()); + assertEquals(user2.getShortUserName(), + getBucketResponse.getContents().get(1).getOwner().getDisplayName()); + + } + @Test public void listWithPrefixAndDelimiter() throws OS3Exception, IOException { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java index ab87f9c98e11..1872c440da31 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java @@ -20,12 +20,16 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.UUID; +import java.util.concurrent.CompletableFuture; + import org.junit.jupiter.api.Test; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * Class tests Unmarshall logic of {@link CompleteMultipartUploadRequest}. @@ -75,13 +79,14 @@ public void fromStreamWithoutNamespace() throws IOException { } private void checkContent(CompleteMultipartUploadRequest request) { + assertNotNull(request); assertEquals(2, request.getPartList().size()); List parts = request.getPartList(); - assertEquals(part1, parts.get(0).geteTag()); - assertEquals(part2, parts.get(1).geteTag()); + assertEquals(part1, parts.get(0).getETag()); + assertEquals(part2, parts.get(1).getETag()); } private CompleteMultipartUploadRequest unmarshall( @@ -89,4 +94,43 @@ private CompleteMultipartUploadRequest unmarshall( return new CompleteMultipartUploadRequestUnmarshaller() .readFrom(null, null, null, null, null, inputBody); } + + @Test + public void concurrentParse() { + CompleteMultipartUploadRequestUnmarshaller unmarshaller = + new CompleteMultipartUploadRequestUnmarshaller(); + byte[] bytes = ("" + "" + part1 + + "1" + + part2 + "2" + + "").getBytes( + UTF_8); + + List> futures = + new ArrayList<>(); + for (int i = 0; i < 40; i++) { + futures.add(CompletableFuture.supplyAsync(() -> { + try { + //GIVEN + ByteArrayInputStream inputBody = new ByteArrayInputStream(bytes); + //WHEN + return unmarshall(unmarshaller, inputBody); + } catch (IOException e) { + return null; + } + })); + } + + for (CompletableFuture future : futures) { + CompleteMultipartUploadRequest request = future.join(); + //THEN + checkContent(request); + } + } + + private CompleteMultipartUploadRequest unmarshall( + CompleteMultipartUploadRequestUnmarshaller unmarshaller, + ByteArrayInputStream inputBody) throws IOException { + return unmarshaller + .readFrom(null, null, null, null, null, inputBody); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 3e8beb2c3a1e..677367e6d812 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -79,17 +79,17 @@ public static void setUp() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 2, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 3, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index eedee2855e7d..b23dbfb9c05c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -30,16 +30,22 @@ import org.junit.jupiter.api.Test; import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -54,6 +60,7 @@ public class TestMultipartUploadComplete { private static final ObjectEndpoint REST = new ObjectEndpoint(); + private static final HttpHeaders HEADERS = mock(HttpHeaders.class); private static final OzoneClient CLIENT = new OzoneClientStub(); @BeforeAll @@ -61,18 +68,30 @@ public static void setUp() throws Exception { CLIENT.getObjectStore().createS3Bucket(OzoneConsts.S3_BUCKET); - - HttpHeaders headers = mock(HttpHeaders.class); - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( + when(HEADERS.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( "STANDARD"); - REST.setHeaders(headers); + REST.setHeaders(HEADERS); REST.setClient(CLIENT); REST.setOzoneConfiguration(new OzoneConfiguration()); } private String initiateMultipartUpload(String key) throws IOException, OS3Exception { + return initiateMultipartUpload(key, Collections.emptyMap()); + } + + private String initiateMultipartUpload(String key, Map metadata) throws IOException, + OS3Exception { + MultivaluedMap metadataHeaders = new MultivaluedHashMap<>(); + + for (Map.Entry entry : metadata.entrySet()) { + metadataHeaders.computeIfAbsent(CUSTOM_METADATA_HEADER_PREFIX + entry.getKey(), k -> new ArrayList<>()) + .add(entry.getValue()); + } + + when(HEADERS.getRequestHeaders()).thenReturn(metadataHeaders); + Response response = REST.initializeMultipartUpload(OzoneConsts.S3_BUCKET, key); MultipartUploadInitiateResponse multipartUploadInitiateResponse = @@ -83,7 +102,6 @@ private String initiateMultipartUpload(String key) throws IOException, assertEquals(200, response.getStatus()); return uploadID; - } private Part uploadPart(String key, String uploadID, int partNumber, String @@ -93,9 +111,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; @@ -152,6 +170,37 @@ public void testMultipart() throws Exception { } + @Test + public void testMultipartWithCustomMetadata() throws Exception { + String key = UUID.randomUUID().toString(); + + Map customMetadata = new HashMap<>(); + customMetadata.put("custom-key1", "custom-value1"); + customMetadata.put("custom-key2", "custom-value2"); + + String uploadID = initiateMultipartUpload(key, customMetadata); + + List partsList = new ArrayList<>(); + + // Upload parts + String content = "Multipart Upload 1"; + int partNumber = 1; + + Part part1 = uploadPart(key, uploadID, partNumber, content); + partsList.add(part1); + + CompleteMultipartUploadRequest completeMultipartUploadRequest = new + CompleteMultipartUploadRequest(); + completeMultipartUploadRequest.setPartList(partsList); + + completeMultipartUpload(key, completeMultipartUploadRequest, uploadID); + + Response headResponse = REST.head(OzoneConsts.S3_BUCKET, key); + + assertEquals("custom-value1", headResponse.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + "custom-key1")); + assertEquals("custom-value2", headResponse.getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + "custom-key2")); + } + @Test public void testMultipartInvalidPartOrderError() throws Exception { @@ -202,7 +251,7 @@ public void testMultipartInvalidPartError() throws Exception { Part part1 = uploadPart(key, uploadID, partNumber, content); // Change part name. - part1.seteTag("random"); + part1.setETag("random"); partsList.add(part1); content = "Multipart Upload 2"; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index a773b8757981..d9595aeff796 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -32,6 +32,7 @@ import java.util.Map; import java.util.Scanner; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -91,7 +92,11 @@ public static void setUp() throws Exception { try (OutputStream stream = bucket .createKey(EXISTING_KEY, keyContent.length, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, - ReplicationFactor.THREE), new HashMap<>())) { + ReplicationFactor.THREE), + new HashMap() {{ + put(OzoneConsts.ETAG, DigestUtils.md5Hex(EXISTING_KEY_CONTENT)); + }} + )) { stream.write(keyContent); } @@ -327,9 +332,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; @@ -377,7 +382,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, assertNotNull(result.getETag()); assertNotNull(result.getLastModified()); Part part = new Part(); - part.seteTag(result.getETag()); + part.setETag(result.getETag()); part.setPartNumber(partNumber); return part; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index 91e29cadc85e..8cf8da95cf80 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -25,6 +25,7 @@ import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.Response; import javax.ws.rs.core.UriInfo; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.time.format.DateTimeFormatter; @@ -33,7 +34,6 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.commons.io.IOUtils; @@ -44,6 +44,9 @@ import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_FSO_DIRECTORY_CREATION_ENABLED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_COUNT_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.mockito.Mockito.doReturn; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -55,7 +58,10 @@ */ public class TestObjectGet { - public static final String CONTENT = "0123456789"; + private static final String CONTENT = "0123456789"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_NAME = "key1"; + private static final String KEY_WITH_TAG = "keyWithTag"; public static final String CONTENT_TYPE1 = "video/mp4"; public static final String CONTENT_TYPE2 = "text/html; charset=UTF-8"; public static final String CONTENT_LANGUAGE1 = "en-CA"; @@ -76,15 +82,10 @@ public class TestObjectGet { private ContainerRequestContext context; @BeforeEach - public void init() throws IOException { + public void init() throws OS3Exception, IOException { //GIVEN client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket("b1"); - OzoneBucket bucket = client.getObjectStore().getS3Bucket("b1"); - OzoneOutputStream keyStream = - bucket.createKey("key1", CONTENT.getBytes(UTF_8).length); - keyStream.write(CONTENT.getBytes(UTF_8)); - keyStream.close(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); rest = new ObjectEndpoint(); rest.setClient(client); @@ -92,6 +93,14 @@ public void init() throws IOException { headers = mock(HttpHeaders.class); rest.setHeaders(headers); + ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), + 1, null, body); + // Create a key with object tags + when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, body); + context = mock(ContainerRequestContext.class); when(context.getUriInfo()).thenReturn(mock(UriInfo.class)); when(context.getUriInfo().getQueryParameters()) @@ -102,12 +111,12 @@ public void init() throws IOException { @Test public void get() throws IOException, OS3Exception { //WHEN - Response response = rest.get("b1", "key1", 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); //THEN OzoneInputStream ozoneInputStream = - client.getObjectStore().getS3Bucket("b1") - .readKey("key1"); + client.getObjectStore().getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); @@ -118,13 +127,35 @@ public void get() throws IOException, OS3Exception { DateTimeFormatter.RFC_1123_DATE_TIME .parse(response.getHeaderString("Last-Modified")); + assertNull(response.getHeaderString(TAG_COUNT_HEADER)); + } + + @Test + public void getKeyWithTag() throws IOException, OS3Exception { + //WHEN + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null); + + //THEN + OzoneInputStream ozoneInputStream = + client.getObjectStore().getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); + String keyContent = + IOUtils.toString(ozoneInputStream, UTF_8); + + assertEquals(CONTENT, keyContent); + assertEquals("" + keyContent.length(), + response.getHeaderString("Content-Length")); + + DateTimeFormatter.RFC_1123_DATE_TIME + .parse(response.getHeaderString("Last-Modified")); + assertEquals("2", response.getHeaderString(TAG_COUNT_HEADER)); } @Test public void inheritRequestHeader() throws IOException, OS3Exception { setDefaultHeader(); - Response response = rest.get("b1", "key1", 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); assertEquals(CONTENT_TYPE1, response.getHeaderString("Content-Type")); @@ -157,7 +188,7 @@ public void overrideResponseHeader() throws IOException, OS3Exception { when(context.getUriInfo().getQueryParameters()) .thenReturn(queryParameter); - Response response = rest.get("b1", "key1", 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); assertEquals(CONTENT_TYPE2, response.getHeaderString("Content-Type")); @@ -178,24 +209,26 @@ public void getRangeHeader() throws IOException, OS3Exception { Response response; when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0"); - response = rest.get("b1", "key1", 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); assertEquals("1", response.getHeaderString("Content-Length")); assertEquals(String.format("bytes 0-0/%s", CONTENT.length()), response.getHeaderString("Content-Range")); when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-"); - response = rest.get("b1", "key1", 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); assertEquals(String.valueOf(CONTENT.length()), response.getHeaderString("Content-Length")); assertEquals( String.format("bytes 0-%s/%s", CONTENT.length() - 1, CONTENT.length()), response.getHeaderString("Content-Range")); + + assertNull(response.getHeaderString(TAG_COUNT_HEADER)); } @Test public void getStatusCode() throws IOException, OS3Exception { Response response; - response = rest.get("b1", "key1", 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); assertEquals(response.getStatus(), Response.Status.OK.getStatusCode()); @@ -203,9 +236,10 @@ public void getStatusCode() throws IOException, OS3Exception { // The 206 (Partial Content) status code indicates that the server is // successfully fulfilling a range request for the target resource when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1"); - response = rest.get("b1", "key1", 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); assertEquals(response.getStatus(), Response.Status.PARTIAL_CONTENT.getStatusCode()); + assertNull(response.getHeaderString(TAG_COUNT_HEADER)); } private void setDefaultHeader() { @@ -227,17 +261,16 @@ private void setDefaultHeader() { public void testGetWhenKeyIsDirectoryAndDoesNotEndWithASlash() throws IOException { // GIVEN - final String bucketName = "b1"; final String keyPath = "keyDir"; OzoneConfiguration config = new OzoneConfiguration(); config.set(OZONE_S3G_FSO_DIRECTORY_CREATION_ENABLED, "true"); rest.setOzoneConfiguration(config); - OzoneBucket bucket = client.getObjectStore().getS3Bucket(bucketName); + OzoneBucket bucket = client.getObjectStore().getS3Bucket(BUCKET_NAME); bucket.createDirectory(keyPath); // WHEN final OS3Exception ex = assertThrows(OS3Exception.class, - () -> rest.get(bucketName, keyPath, 0, null, 0, null)); + () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null)); // THEN assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index ae8279f25861..8cde144a3742 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -23,40 +23,66 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.Map; +import java.util.stream.Stream; +import java.io.OutputStream; +import java.security.MessageDigest; import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.http.HttpStatus; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.MockedStatic; +import org.mockito.Mockito; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_TAG; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_DIRECTIVE_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_KEY_LENGTH_LIMIT; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_NUM_LIMIT; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_VALUE_LENGTH_LIMIT; import static org.apache.hadoop.ozone.s3.util.S3Utils.urlEncode; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -64,104 +90,105 @@ /** * Test put object. */ -public class TestObjectPut { - public static final String CONTENT = "0123456789"; - private String bucketName = "b1"; - private String keyName = "key=value/1"; - private String destBucket = "b2"; - private String destkey = "key=value/2"; - private String nonexist = "nonexist"; +class TestObjectPut { + private static final String CONTENT = "0123456789"; + private static final String FSO_BUCKET_NAME = "fso-bucket"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_NAME = "key=value/1"; + private static final String DEST_BUCKET_NAME = "b2"; + private static final String DEST_KEY = "key=value/2"; + private static final String NO_SUCH_BUCKET = "nonexist"; + private OzoneClient clientStub; private ObjectEndpoint objectEndpoint; + private HttpHeaders headers; + private OzoneBucket bucket; + private OzoneBucket fsoBucket; + + static Stream argumentsForPutObject() { + ReplicationConfig ratis3 = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE); + ECReplicationConfig ec = new ECReplicationConfig("rs-3-2-1024K"); + return Stream.of( + Arguments.of(0, ratis3), + Arguments.of(10, ratis3), + Arguments.of(0, ec), + Arguments.of(10, ec) + ); + } @BeforeEach - public void setup() throws IOException { + void setup() throws IOException { + OzoneConfiguration config = new OzoneConfiguration(); + //Create client stub and object store stub. clientStub = new OzoneClientStub(); // Create bucket - clientStub.getObjectStore().createS3Bucket(bucketName); - clientStub.getObjectStore().createS3Bucket(destBucket); + clientStub.getObjectStore().createS3Bucket(BUCKET_NAME); + bucket = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME); + clientStub.getObjectStore().createS3Bucket(DEST_BUCKET_NAME); // Create PutObject and setClient to OzoneClientStub - objectEndpoint = new ObjectEndpoint(); + objectEndpoint = spy(new ObjectEndpoint()); objectEndpoint.setClient(clientStub); - objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + objectEndpoint.setOzoneConfiguration(config); + + headers = mock(HttpHeaders.class); + objectEndpoint.setHeaders(headers); + + String volumeName = config.get(OzoneConfigKeys.OZONE_S3_VOLUME_NAME, + OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT); + OzoneVolume volume = clientStub.getObjectStore().getVolume(volumeName); + BucketArgs fsoBucketArgs = BucketArgs.newBuilder() + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .build(); + volume.createBucket(FSO_BUCKET_NAME, fsoBucketArgs); + fsoBucket = volume.getBucket(FSO_BUCKET_NAME); } - @Test - public void testPutObject() throws IOException, OS3Exception { + @ParameterizedTest + @MethodSource("argumentsForPutObject") + void testPutObject(int length, ReplicationConfig replication) throws IOException, OS3Exception { //GIVEN - HttpHeaders headers = mock(HttpHeaders.class); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); + final String content = RandomStringUtils.randomAlphanumeric(length); + ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, body); //THEN - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, UTF_8); - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); - } - @Test - public void testPutObjectWithECReplicationConfig() - throws IOException, OS3Exception { - //GIVEN - HttpHeaders headers = mock(HttpHeaders.class); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - ECReplicationConfig ecReplicationConfig = - new ECReplicationConfig("rs-3-2-1024K"); - clientStub.getObjectStore().getS3Bucket(bucketName) - .setReplicationConfig(ecReplicationConfig); - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - - assertEquals(ecReplicationConfig, - clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName) - .getReplicationConfig()); - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, UTF_8); - - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); + String keyContent; + try (InputStream input = bucket.readKey(KEY_NAME)) { + keyContent = IOUtils.toString(input, UTF_8); + } + assertEquals(content, keyContent); + + OzoneKeyDetails keyDetails = bucket.getKey(KEY_NAME); + assertEquals(replication, keyDetails.getReplicationConfig()); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + assertThat(keyDetails.getTags()).isEmpty(); } @Test - public void testPutObjectContentLength() throws IOException, OS3Exception { + void testPutObjectContentLength() throws IOException, OS3Exception { // The contentLength specified when creating the Key should be the same as // the Content-Length, the key Commit will compare the Content-Length with // the actual length of the data written. - HttpHeaders headers = mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); long dataSize = CONTENT.length(); - objectEndpoint.put(bucketName, keyName, dataSize, 0, null, body); - assertEquals(dataSize, getKeyDataSize(keyName)); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, body); + assertEquals(dataSize, getKeyDataSize()); } @Test - public void testPutObjectContentLengthForStreaming() + void testPutObjectContentLengthForStreaming() throws IOException, OS3Exception { - HttpHeaders headers = mock(HttpHeaders.class); - objectEndpoint.setHeaders(headers); - String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" @@ -172,22 +199,142 @@ public void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(bucketName, keyName, chunkedContent.length(), 0, null, + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); - assertEquals(15, getKeyDataSize(keyName)); + assertEquals(15, getKeyDataSize()); + } + + @Test + public void testPutObjectWithTags() throws IOException, OS3Exception { + HttpHeaders headersWithTags = Mockito.mock(HttpHeaders.class); + when(headersWithTags.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + objectEndpoint.setHeaders(headersWithTags); + + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), + 1, null, body); + + assertEquals(200, response.getStatus()); + + OzoneKeyDetails keyDetails = + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + Map tags = keyDetails.getTags(); + assertEquals(2, tags.size()); + assertEquals("value1", tags.get("tag1")); + assertEquals("value2", tags.get("tag2")); } - private long getKeyDataSize(String key) throws IOException { - return clientStub.getObjectStore().getS3Bucket(bucketName) - .getKey(key).getDataSize(); + @Test + public void testPutObjectWithOnlyTagKey() throws Exception { + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + HttpHeaders headerWithOnlyTagKey = Mockito.mock(HttpHeaders.class); + // Try to send with only the key (no value) + when(headerWithOnlyTagKey.getHeaderString(TAG_HEADER)).thenReturn("tag1"); + objectEndpoint.setHeaders(headerWithOnlyTagKey); + + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), + 1, null, body); + fail("request with invalid query param should fail"); + } catch (OS3Exception ex) { + assertEquals(INVALID_TAG.getCode(), ex.getCode()); + assertThat(ex.getErrorMessage()).contains("Some tag values are not specified"); + assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); + } } @Test - public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { - //GIVEN - HttpHeaders headers = mock(HttpHeaders.class); - objectEndpoint.setHeaders(headers); + public void testPutObjectWithDuplicateTagKey() throws Exception { + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + HttpHeaders headersWithDuplicateTagKey = Mockito.mock(HttpHeaders.class); + when(headersWithDuplicateTagKey.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag1=value2"); + objectEndpoint.setHeaders(headersWithDuplicateTagKey); + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), + 1, null, body); + fail("request with duplicate tag key should fail"); + } catch (OS3Exception ex) { + assertEquals(INVALID_TAG.getCode(), ex.getCode()); + assertThat(ex.getErrorMessage()).contains("There are tags with duplicate tag keys"); + assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); + } + } + + @Test + public void testPutObjectWithLongTagKey() throws Exception { + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + HttpHeaders headersWithLongTagKey = Mockito.mock(HttpHeaders.class); + String longTagKey = StringUtils.repeat('k', TAG_KEY_LENGTH_LIMIT + 1); + when(headersWithLongTagKey.getHeaderString(TAG_HEADER)).thenReturn(longTagKey + "=value1"); + objectEndpoint.setHeaders(headersWithLongTagKey); + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), + 1, null, body); + fail("request with tag key exceeding the length limit should fail"); + } catch (OS3Exception ex) { + assertEquals(INVALID_TAG.getCode(), ex.getCode()); + assertThat(ex.getErrorMessage()).contains("The tag key exceeds the maximum length"); + assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); + } + } + @Test + public void testPutObjectWithLongTagValue() throws Exception { + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + HttpHeaders headersWithLongTagValue = Mockito.mock(HttpHeaders.class); + objectEndpoint.setHeaders(headersWithLongTagValue); + String longTagValue = StringUtils.repeat('v', TAG_VALUE_LENGTH_LIMIT + 1); + when(headersWithLongTagValue.getHeaderString(TAG_HEADER)).thenReturn("tag1=" + longTagValue); + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), + 1, null, body); + fail("request with tag value exceeding the length limit should fail"); + } catch (OS3Exception ex) { + assertEquals(INVALID_TAG.getCode(), ex.getCode()); + assertThat(ex.getErrorMessage()).contains("The tag value exceeds the maximum length"); + assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); + } + } + + @Test + public void testPutObjectWithTooManyTags() throws Exception { + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + HttpHeaders headersWithTooManyTags = Mockito.mock(HttpHeaders.class); + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < TAG_NUM_LIMIT + 1; i++) { + sb.append(String.format("tag%d=value%d", i, i)); + if (i < TAG_NUM_LIMIT) { + sb.append("&"); + } + } + when(headersWithTooManyTags.getHeaderString(TAG_HEADER)).thenReturn(sb.toString()); + objectEndpoint.setHeaders(headersWithTooManyTags); + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), + 1, null, body); + fail("request with number of tags exceeding limit should fail"); + } catch (OS3Exception ex) { + assertEquals(INVALID_TAG.getCode(), ex.getCode()); + assertThat(ex.getErrorMessage()).contains("exceeded the maximum number of tags"); + assertEquals(INVALID_TAG.getHttpCode(), ex.getHttpCode()); + } + } + + private long getKeyDataSize() throws IOException { + return clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_NAME).getDataSize(); + } + + @Test + void testPutObjectWithSignedChunks() throws IOException, OS3Exception { + //GIVEN String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" @@ -199,202 +346,389 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { .thenReturn("15"); //WHEN - Response response = objectEndpoint.put(bucketName, keyName, + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 1, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(200, response.getStatus()); assertEquals("1234567890abcde", keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); } @Test - public void testCopyObject() throws IOException, OS3Exception { + public void testPutObjectMessageDigestResetDuringException() throws OS3Exception { + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // For example, EOFException during put-object due to client cancelling the operation before it completes + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT + .length(), 1, null, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + + @Test + void testCopyObject() throws IOException, OS3Exception { // Put object in to source bucket - HttpHeaders headers = mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; - Response response = objectEndpoint.put(bucketName, keyName, + // Add some custom metadata + MultivaluedMap metadataHeaders = new MultivaluedHashMap<>(); + metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1", "custom-value-1"); + metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2", "custom-value-2"); + when(headers.getRequestHeaders()).thenReturn(metadataHeaders); + // Add COPY metadata directive (default) + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); + + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() - .getS3Bucket(bucketName) - .readKey(keyName); + .getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + assertThat(keyDetails.getMetadata().get("custom-key-1")).isEqualTo("custom-value-1"); + assertThat(keyDetails.getMetadata().get("custom-key-2")).isEqualTo("custom-value-2"); + + String sourceETag = keyDetails.getMetadata().get(OzoneConsts.ETAG); + // This will be ignored since the copy directive is COPY + metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-3", "custom-value-3"); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); - response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, + response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body); // Check destination key and response - ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket) - .readKey(destkey); + ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) + .readKey(DEST_KEY); keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails sourceKeyDetails = clientStub.getObjectStore() + .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + // Source key eTag should remain unchanged and the dest key should have + // the same Etag since the key content is the same + assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertThat(destKeyDetails.getMetadata().get("custom-key-1")).isEqualTo("custom-value-1"); + assertThat(destKeyDetails.getMetadata().get("custom-key-2")).isEqualTo("custom-value-2"); + assertThat(destKeyDetails.getMetadata().containsKey("custom-key-3")).isFalse(); + + // Now use REPLACE metadata directive (default) and remove some custom metadata used in the source key + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("REPLACE"); + metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1"); + metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); + + response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, + null, body); - // source and dest same + ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) + .readKey(DEST_KEY); + + keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + sourceKeyDetails = clientStub.getObjectStore() + .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); + + assertEquals(200, response.getStatus()); + assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + // Source key eTag should remain unchanged and the dest key should have + // the same Etag since the key content is the same + assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertThat(destKeyDetails.getMetadata().containsKey("custom-key-1")).isFalse(); + assertThat(destKeyDetails.getMetadata().containsKey("custom-key-2")).isFalse(); + assertThat(destKeyDetails.getMetadata().get("custom-key-3")).isEqualTo("custom-value-3"); + + + // wrong copy metadata directive + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body), + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body), + "test copy object failed"); + assertThat(e.getHttpCode()).isEqualTo(400); + assertThat(e.getCode()).isEqualTo("InvalidArgument"); + assertThat(e.getErrorMessage()).contains("The metadata copy directive specified is invalid"); + + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); + + // source and dest same + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); // source bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(destBucket, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(nonexist)); + BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", keyName, CONTENT.length(), 1, null, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); } @Test - public void testInvalidStorageType() throws IOException { - HttpHeaders headers = mock(HttpHeaders.class); + public void testCopyObjectMessageDigestResetDuringException() throws IOException, OS3Exception { + // Put object in to source bucket + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, + CONTENT.length(), 1, null, body); + + OzoneInputStream ozoneInputStream = clientStub.getObjectStore() + .getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); + + String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + + assertEquals(200, response.getStatus()); + assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // Add the mocked methods only during the copy request + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + + // Add copy header, and then call put + when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); + + try { + objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, + null, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + + @Test + public void testCopyObjectWithTags() throws IOException, OS3Exception { + // Put object in to source bucket + HttpHeaders headersForPut = Mockito.mock(HttpHeaders.class); + when(headersForPut.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + objectEndpoint.setHeaders(headersForPut); + + String sourceKeyName = "sourceKey"; + + Response putResponse = objectEndpoint.put(BUCKET_NAME, sourceKeyName, + CONTENT.length(), 1, null, body); + OzoneKeyDetails keyDetails = + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(sourceKeyName); + + assertEquals(200, putResponse.getStatus()); + Map tags = keyDetails.getTags(); + assertEquals(2, tags.size()); + assertEquals("value1", tags.get("tag1")); + assertEquals("value2", tags.get("tag2")); + + // Copy object without x-amz-tagging-directive (default to COPY) + String destKey = "key=value/2"; + HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); + when(headersForCopy.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( + BUCKET_NAME + "/" + urlEncode(sourceKeyName)); + + objectEndpoint.setHeaders(headersForCopy); + Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + + OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); + + assertEquals(200, copyResponse.getStatus()); + Map destKeyTags = destKeyDetails.getTags(); + + // Since the default directive is COPY, it will copy the source key's tags + // to the destination key + assertEquals(2, destKeyTags.size()); + assertEquals("value1", destKeyTags.get("tag1")); + assertEquals("value2", destKeyTags.get("tag2")); + + // Copy object with x-amz-tagging-directive = COPY + when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("COPY"); + + // With x-amz-tagging-directive = COPY with a different x-amz-tagging + when(headersForCopy.getHeaderString(TAG_HEADER)).thenReturn("tag3=value3"); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + assertEquals(200, copyResponse.getStatus()); + + destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); + destKeyTags = destKeyDetails.getTags(); + + // Since the x-amz-tagging-directive is COPY, we ignore the x-amz-tagging + // header + assertEquals(2, destKeyTags.size()); + assertEquals("value1", destKeyTags.get("tag1")); + assertEquals("value2", destKeyTags.get("tag2")); + + // Copy object with x-amz-tagging-directive = REPLACE + when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("REPLACE"); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + assertEquals(200, copyResponse.getStatus()); + + destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); + destKeyTags = destKeyDetails.getTags(); + + // Since the x-amz-tagging-directive is REPLACE, we replace the source key + // tags with the one specified in the copy request + assertEquals(1, destKeyTags.size()); + assertEquals("value3", destKeyTags.get("tag3")); + assertThat(destKeyTags).doesNotContainKeys("tag1", "tag2"); + } + + @Test + public void testCopyObjectWithInvalidTagCopyDirective() throws Exception { + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Copy object with invalid x-amz-tagging-directive + HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); + when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("INVALID"); + try { + objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, body); + } catch (OS3Exception ex) { + assertEquals(INVALID_ARGUMENT.getCode(), ex.getCode()); + assertThat(ex.getErrorMessage()).contains("The tagging copy directive specified is invalid"); + assertEquals(INVALID_ARGUMENT.getHttpCode(), ex.getHttpCode()); + } + } + + @Test + void testInvalidStorageType() { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body)); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); } @Test - public void testEmptyStorageType() throws IOException, OS3Exception { - HttpHeaders headers = mock(HttpHeaders.class); + void testEmptyStorageType() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - objectEndpoint.put(bucketName, keyName, CONTENT + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT .length(), 1, null, body); OzoneKeyDetails key = - clientStub.getObjectStore().getS3Bucket(bucketName) - .getKey(keyName); - + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_NAME); //default type is set - assertEquals(ReplicationType.RATIS, key.getReplicationType()); + assertEquals( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + key.getReplicationConfig()); } @Test - public void testDirectoryCreation() throws IOException, + void testDirectoryCreation() throws IOException, OS3Exception { // GIVEN - final String path = "dir"; - final long length = 0L; - final int partNumber = 0; - final String uploadId = ""; - final InputStream body = null; - final HttpHeaders headers = mock(HttpHeaders.class); - final ObjectEndpoint objEndpoint = new ObjectEndpoint(); - objEndpoint.setOzoneConfiguration(new OzoneConfiguration()); - objEndpoint.setHeaders(headers); - final OzoneClient client = mock(OzoneClient.class); - objEndpoint.setClient(client); - final ObjectStore objectStore = mock(ObjectStore.class); - final OzoneVolume volume = mock(OzoneVolume.class); - final OzoneBucket bucket = mock(OzoneBucket.class); - final ClientProtocol protocol = mock(ClientProtocol.class); + final String path = "dir/"; // WHEN - when(client.getObjectStore()).thenReturn(objectStore); - when(client.getObjectStore().getS3Volume()).thenReturn(volume); - when(volume.getBucket(bucketName)).thenReturn(bucket); - when(bucket.getBucketLayout()) - .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED); - when(client.getProxy()).thenReturn(protocol); - final Response response = objEndpoint.put(bucketName, path, length, - partNumber, uploadId, body); + try (Response response = objectEndpoint.put(fsoBucket.getName(), path, + 0L, 0, "", null)) { + assertEquals(HttpStatus.SC_OK, response.getStatus()); + } // THEN - assertEquals(HttpStatus.SC_OK, response.getStatus()); - verify(protocol).createDirectory(any(), eq(bucketName), eq(path)); + OzoneKeyDetails key = fsoBucket.getKey(path); + assertThat(key.isFile()).as("directory").isFalse(); } @Test - public void testDirectoryCreationOverFile() throws IOException { + void testDirectoryCreationOverFile() throws IOException, OS3Exception { // GIVEN final String path = "key"; - final long length = 0L; - final int partNumber = 0; - final String uploadId = ""; final ByteArrayInputStream body = - new ByteArrayInputStream("content".getBytes(UTF_8)); - final HttpHeaders headers = mock(HttpHeaders.class); - final ObjectEndpoint objEndpoint = new ObjectEndpoint(); - objEndpoint.setOzoneConfiguration(new OzoneConfiguration()); - objEndpoint.setHeaders(headers); - final OzoneClient client = mock(OzoneClient.class); - objEndpoint.setClient(client); - final ObjectStore objectStore = mock(ObjectStore.class); - final OzoneVolume volume = mock(OzoneVolume.class); - final OzoneBucket bucket = mock(OzoneBucket.class); - final ClientProtocol protocol = mock(ClientProtocol.class); + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", body); // WHEN - when(client.getObjectStore()).thenReturn(objectStore); - when(client.getObjectStore().getS3Volume()).thenReturn(volume); - when(volume.getBucket(bucketName)).thenReturn(bucket); - when(bucket.getBucketLayout()) - .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED); - when(client.getProxy()).thenReturn(protocol); - doThrow(new OMException(OMException.ResultCodes.FILE_ALREADY_EXISTS)) - .when(protocol) - .createDirectory(any(), any(), any()); + final OS3Exception exception = assertThrows(OS3Exception.class, + () -> objectEndpoint + .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null) + .close()); // THEN - final OS3Exception exception = assertThrows(OS3Exception.class, - () -> objEndpoint - .put(bucketName, path, length, partNumber, uploadId, body)); - assertEquals("Conflict", exception.getCode()); - assertEquals(409, exception.getHttpCode()); - verify(protocol, times(1)).createDirectory(any(), any(), any()); + assertEquals(S3ErrorTable.NO_OVERWRITE.getCode(), exception.getCode()); + assertEquals(S3ErrorTable.NO_OVERWRITE.getHttpCode(), exception.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 90d490dea0b6..aecc56fe172b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; @@ -28,12 +29,16 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.MessageDigest; import java.util.UUID; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; @@ -44,7 +49,13 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; /** @@ -90,7 +101,7 @@ public void testPartUpload() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -112,16 +123,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } @@ -194,6 +205,53 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { assertContentLength(uploadID, keyName, content.length()); } + @Test + public void testPartUploadMessageDigestResetDuringException() throws IOException, OS3Exception { + OzoneClient clientStub = new OzoneClientStub(); + clientStub.getObjectStore().createS3Bucket(OzoneConsts.S3_BUCKET); + + + HttpHeaders headers = mock(HttpHeaders.class); + when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( + "STANDARD"); + + ObjectEndpoint objectEndpoint = spy(new ObjectEndpoint()); + + objectEndpoint.setHeaders(headers); + objectEndpoint.setClient(clientStub); + objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + + Response response = objectEndpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + OzoneConsts.KEY); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); + + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // Add the mocked methods only during the copy request + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + + String content = "Multipart Upload"; + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + try { + objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 1, uploadID, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + private void assertContentLength(String uploadID, String key, long contentLength) throws IOException { OzoneMultipartUploadPartListParts parts = diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 787aa6e8777a..28ce32e74707 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; @@ -67,7 +68,7 @@ public static void setUp() throws Exception { REST.setClient(client); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); REST.setOzoneConfiguration(conf); REST.init(); @@ -95,7 +96,7 @@ public void testPartUpload() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -116,16 +117,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index d891573d5f13..04551ac7cc43 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -45,6 +45,7 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.anyMap; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.isNull; @@ -248,7 +249,7 @@ public void testPutKey() throws IOException { when(objectStore.getS3Volume()).thenReturn(volume); when(volume.getBucket("bucketName")).thenReturn(bucket); doThrow(exception).when(clientProtocol).createKey( - anyString(), anyString(), anyString(), anyLong(), any(), any()); + anyString(), anyString(), anyString(), anyLong(), any(), anyMap(), anyMap()); ObjectEndpoint objectEndpoint = new ObjectEndpoint(); objectEndpoint.setClient(client); objectEndpoint.setHeaders(headers); @@ -278,7 +279,7 @@ public void testDeleteKey() throws IOException { @Test public void testMultiUploadKey() throws IOException { when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); - doThrow(exception).when(bucket).initiateMultipartUpload(anyString(), any()); + doThrow(exception).when(bucket).initiateMultipartUpload(anyString(), any(), anyMap(), anyMap()); ObjectEndpoint objectEndpoint = new ObjectEndpoint(); objectEndpoint.setClient(client); objectEndpoint.setHeaders(headers); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index f92496249e20..d988b4302308 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -81,7 +81,7 @@ public static void setUp() throws Exception { REST.setClient(client); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setStorageSize(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, 1, StorageUnit.BYTES); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java index 0585fea000c9..3e7214ce988b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java @@ -17,18 +17,16 @@ */ package org.apache.hadoop.ozone.admin.nssummary; -import com.google.gson.internal.LinkedTreeMap; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.shell.ListOptions; import picocli.CommandLine; -import java.util.ArrayList; -import java.util.HashMap; import java.util.concurrent.Callable; - +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.parseInputPath; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; @@ -101,21 +99,20 @@ public Void call() throws Exception { return null; } - HashMap duResponse = getResponseMap(response); + JsonNode duResponse = JsonUtils.readTree(response); - if (duResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(duResponse.path("status").asText(""))) { printPathNotFound(); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } - long totalSize = (long)(double)duResponse.get("size"); - + long totalSize = duResponse.path("size").asLong(-1); if (!noHeader) { printWithUnderline("Path", false); printKVSeparator(); - System.out.println(duResponse.get("path")); + System.out.println(duResponse.path("path").asText("")); printWithUnderline("Total Size", false); printKVSeparator(); @@ -124,11 +121,11 @@ public Void call() throws Exception { if (withReplica) { printWithUnderline("Total Disk Usage", false); printKVSeparator(); - long du = (long)(double)duResponse.get("sizeWithReplica"); + long du = duResponse.path("sizeWithReplica").asLong(-1); System.out.println(FileUtils.byteCountToDisplaySize(du)); } - long sizeDirectKey = (long)(double)duResponse.get("sizeDirectKey"); + long sizeDirectKey = duResponse.path("sizeDirectKey").asLong(-1); if (!listFiles && sizeDirectKey != -1) { printWithUnderline("Size of Direct Keys", false); printKVSeparator(); @@ -137,7 +134,7 @@ public Void call() throws Exception { printNewLines(1); } - if ((double)duResponse.get("subPathCount") == 0) { + if (duResponse.path("subPathCount").asInt(-1) == 0) { if (totalSize == 0) { // the object is empty System.out.println("The object is empty.\n" + @@ -160,20 +157,19 @@ public Void call() throws Exception { seekStr = ""; } - ArrayList duData = (ArrayList)duResponse.get("subPaths"); + ArrayNode subPaths = (ArrayNode) duResponse.path("subPaths"); int cnt = 0; - for (int i = 0; i < duData.size(); ++i) { + for (JsonNode subPathDU : subPaths) { if (cnt >= limit) { break; } - LinkedTreeMap subPathDU = (LinkedTreeMap) duData.get(i); - String subPath = subPathDU.get("path").toString(); + String subPath = subPathDU.path("path").asText(""); // differentiate key from other types - if (!(boolean)subPathDU.get("isKey")) { + if (!subPathDU.path("isKey").asBoolean(false)) { subPath += OM_KEY_PREFIX; } - long size = (long)(double)subPathDU.get("size"); - long sizeWithReplica = (long)(double)subPathDU.get("sizeWithReplica"); + long size = subPathDU.path("size").asLong(-1); + long sizeWithReplica = subPathDU.path("sizeWithReplica").asLong(-1); if (subPath.startsWith(seekStr)) { printDURow(subPath, size, sizeWithReplica); ++cnt; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java index f74ee109504c..0af263dbe31d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.ArrayList; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printBucketReminder; @@ -73,11 +72,11 @@ public Void call() throws Exception { printNewLines(1); return null; } - HashMap distResponse = getResponseMap(response); + JsonNode distResponse = JsonUtils.readTree(response); - if (distResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(distResponse.path("status").asText())) { printPathNotFound(); - } else if (distResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { + } else if ("TYPE_NOT_APPLICABLE".equals(distResponse.path("status").asText())) { printTypeNA("File Size Distribution"); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -85,11 +84,11 @@ public Void call() throws Exception { } printWithUnderline("File Size Distribution", true); - ArrayList fileSizeDist = (ArrayList) distResponse.get("dist"); + JsonNode fileSizeDist = distResponse.path("dist"); double sum = 0; for (int i = 0; i < fileSizeDist.size(); ++i) { - sum += (double) fileSizeDist.get(i); + sum += fileSizeDist.get(i).asDouble(); } if (sum == 0) { printSpaces(2); @@ -100,11 +99,11 @@ public Void call() throws Exception { } for (int i = 0; i < fileSizeDist.size(); ++i) { - if ((double)fileSizeDist.get(i) == 0) { + if (fileSizeDist.get(i).asDouble() == 0) { continue; } String label = convertBinIndexToReadableRange(i); - printDistRow(label, (double) fileSizeDist.get(i), sum); + printDistRow(label, fileSizeDist.get(i).asDouble(), sum); } } printNewLines(1); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java index 729aa20c5ce3..9aff2e9999ad 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.admin.nssummary; -import com.google.gson.Gson; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -31,7 +30,6 @@ import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; -import java.util.HashMap; import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_OK; @@ -107,10 +105,6 @@ public static String makeHttpCall(StringBuffer url, String path, } } - public static HashMap getResponseMap(String response) { - return new Gson().fromJson(response, HashMap.class); - } - public static void printNewLines(int cnt) { for (int i = 0; i < cnt; ++i) { System.out.println(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java index 113193c929b4..1e4e719baf83 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printBucketReminder; @@ -73,11 +73,11 @@ public Void call() throws Exception { return null; } - HashMap quotaResponse = getResponseMap(response); + JsonNode quotaResponse = JsonUtils.readTree(response); - if (quotaResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(quotaResponse.path("status").asText())) { printPathNotFound(); - } else if (quotaResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { + } else if ("TYPE_NOT_APPLICABLE".equals(quotaResponse.path("status").asText())) { printTypeNA("Quota"); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -85,8 +85,10 @@ public Void call() throws Exception { } printWithUnderline("Quota", true); - long quotaAllowed = (long)(double)quotaResponse.get("allowed"); - long quotaUsed = (long)(double)quotaResponse.get("used"); + + long quotaAllowed = quotaResponse.get("allowed").asLong(); + long quotaUsed = quotaResponse.get("used").asLong(); + printSpaces(2); System.out.print("Allowed"); printKVSeparator(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java index 9180274b9c70..d2060b8db526 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.parseInputPath; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; @@ -71,9 +71,9 @@ public Void call() throws Exception { printNewLines(1); return null; } - HashMap summaryResponse = getResponseMap(response); + JsonNode summaryResponse = JsonUtils.readTree(response); - if (summaryResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(summaryResponse.path("status").asText())) { printPathNotFound(); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -83,10 +83,11 @@ public Void call() throws Exception { printWithUnderline("Entity Type", false); printKVSeparator(); System.out.println(summaryResponse.get("type")); - int numVol = ((Double) summaryResponse.get("numVolume")).intValue(); - int numBucket = ((Double) summaryResponse.get("numBucket")).intValue(); - int numDir = ((Double) summaryResponse.get("numDir")).intValue(); - int numKey = ((Double) summaryResponse.get("numKey")).intValue(); + + int numVol = summaryResponse.path("numVolume").asInt(-1); + int numBucket = summaryResponse.path("numBucket").asInt(-1); + int numDir = summaryResponse.path("numDir").asInt(-1); + int numKey = summaryResponse.path("numKey").asInt(-1); if (numVol != -1) { printWithUnderline("Volumes", false); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/ListOpenFilesSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/ListOpenFilesSubCommand.java index 9ede45a80aa6..723a4ec4022e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/ListOpenFilesSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/ListOpenFilesSubCommand.java @@ -28,6 +28,7 @@ import picocli.CommandLine; import java.io.IOException; +import java.time.Instant; import java.util.List; import java.util.concurrent.Callable; @@ -66,6 +67,16 @@ public class ListOpenFilesSubCommand implements Callable { description = "Format output as JSON") private boolean json; + @CommandLine.Option(names = { "--show-deleted" }, + defaultValue = "false", + description = "Whether to show deleted open keys") + private boolean showDeleted; + + @CommandLine.Option(names = { "--show-overwritten" }, + defaultValue = "false", + description = "Whether to show overwritten open keys") + private boolean showOverwritten; + // Conforms to ListOptions, but not all in ListOptions applies here thus // not using that directly @CommandLine.Option( @@ -95,7 +106,7 @@ public class ListOpenFilesSubCommand implements Callable { public Void call() throws Exception { if (StringUtils.isEmpty(omServiceId) && StringUtils.isEmpty(omHost)) { - System.err.println("Error: Please specify -id or -host"); + System.err.println("Error: Please specify --service-id or --service-host"); return null; } @@ -105,6 +116,12 @@ public Void call() throws Exception { ListOpenFilesResult res = ozoneManagerClient.listOpenFiles(pathPrefix, limit, startItem); + if (!showDeleted) { + res.getOpenKeys().removeIf(o -> o.getKeyInfo().getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY)); + } + if (!showOverwritten) { + res.getOpenKeys().removeIf(o -> o.getKeyInfo().getMetadata().containsKey(OzoneConsts.OVERWRITTEN_HSYNC_KEY)); + } if (json) { // Print detailed JSON printOpenKeysListAsJson(res); @@ -132,13 +149,16 @@ private void printOpenKeysList(ListOpenFilesResult res) { if (startItem != null && !startItem.isEmpty()) { msg += "\nafter continuation token:\n " + startItem; } - msg += "\n\nClient ID\t\tCreation time\tHsync'ed\tOpen File Path"; + msg += "\n\nClient ID\t\t\tCreation time\t\tHsync'ed\t"; + msg += showDeleted ? "Deleted\t" : ""; + msg += showOverwritten ? "Overwritten\t" : ""; + msg += "Open File Path"; System.out.println(msg); for (OpenKeySession e : openFileList) { long clientId = e.getId(); OmKeyInfo omKeyInfo = e.getKeyInfo(); - String line = clientId + "\t" + omKeyInfo.getCreationTime() + "\t"; + String line = clientId + "\t" + Instant.ofEpochMilli(omKeyInfo.getCreationTime()) + "\t"; if (omKeyInfo.isHsync()) { String hsyncClientIdStr = @@ -151,8 +171,24 @@ private void printOpenKeysList(ListOpenFilesResult res) { // initially opens the file (!) line += "Yes w/ cid " + hsyncClientIdStr + "\t"; } + + if (showDeleted) { + if (omKeyInfo.getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY)) { + line += "Yes\t\t"; + } else { + line += "No\t\t"; + } + } + if (showOverwritten) { + if (omKeyInfo.getMetadata().containsKey(OzoneConsts.OVERWRITTEN_HSYNC_KEY)) { + line += "Yes\t"; + } else { + line += "No\t"; + } + } } else { - line += "No\t\t"; + line += showDeleted ? "No\t\tNo\t\t" : "No\t\t"; + line += showOverwritten ? "No\t" : ""; } line += getFullPathFromKeyInfo(omKeyInfo); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java index 99af758b5bad..0a2666d30ee2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.admin.reconfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine; import java.util.List; @@ -44,10 +45,10 @@ public Void call() throws Exception { " --in-service-datanodes is not given."); return null; } - executeCommand(parent.getAddress()); + executeCommand(parent.getService(), parent.getAddress()); } return null; } - protected abstract void executeCommand(String address); + protected abstract void executeCommand(HddsProtos.NodeType nodeType, String address); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java index 0c25b1f67b3b..fc171e52d8d3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.kohsuke.MetaInfServices; @@ -56,6 +57,11 @@ public class ReconfigureCommands implements Callable, @Spec private CommandSpec spec; + @CommandLine.Option(names = {"--service"}, + description = "service: OM, SCM, DATANODE.", + required = true) + private String service; + @CommandLine.Option(names = {"--address"}, description = "node address: or .", required = false) @@ -77,6 +83,10 @@ public String getAddress() { return address; } + public HddsProtos.NodeType getService() { + return HddsProtos.NodeType.valueOf(service); + } + @Override public Class getParentType() { return OzoneAdmin.class; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java index 60bc9c2ef557..99450715ac98 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine.Command; import java.io.IOException; @@ -36,9 +37,9 @@ public class ReconfigurePropertiesSubcommand extends AbstractReconfigureSubCommand { @Override - protected void executeCommand(String address) { + protected void executeCommand(HddsProtos.NodeType nodeType, String address) { try (ReconfigureProtocol reconfigProxy = ReconfigureSubCommandUtil - .getSingleNodeReconfigureProxy(address)) { + .getSingleNodeReconfigureProxy(nodeType, address)) { String serverName = reconfigProxy.getServerName(); List properties = reconfigProxy.listReconfigureProperties(); System.out.printf("%s: Node [%s] Reconfigurable properties:%n", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java index 86d95bf06457..ae2e5a1a7432 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine.Command; import java.io.IOException; @@ -34,9 +35,9 @@ public class ReconfigureStartSubcommand extends AbstractReconfigureSubCommand { @Override - protected void executeCommand(String address) { + protected void executeCommand(HddsProtos.NodeType nodeType, String address) { try (ReconfigureProtocol reconfigProxy = ReconfigureSubCommandUtil - .getSingleNodeReconfigureProxy(address)) { + .getSingleNodeReconfigureProxy(nodeType, address)) { String serverName = reconfigProxy.getServerName(); reconfigProxy.startReconfigure(); System.out.printf("%s: Started reconfiguration task on node [%s].%n", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java index 20e0ee8281cf..07bd2d6f4ac6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.ReconfigurationUtil; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine.Command; import java.io.IOException; @@ -40,9 +41,9 @@ public class ReconfigureStatusSubcommand extends AbstractReconfigureSubCommand { @Override - protected void executeCommand(String address) { + protected void executeCommand(HddsProtos.NodeType nodeType, String address) { try (ReconfigureProtocol reconfigProxy = ReconfigureSubCommandUtil - .getSingleNodeReconfigureProxy(address)) { + .getSingleNodeReconfigureProxy(nodeType, address)) { String serverName = reconfigProxy.getServerName(); ReconfigurationTaskStatus status = reconfigProxy.getReconfigureStatus(); System.out.printf("%s: Reconfiguring status for node [%s]: ", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java index e7e1860c2cb7..b24190dceacd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java @@ -34,7 +34,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; @@ -47,23 +47,23 @@ private ReconfigureSubCommandUtil() { } public static ReconfigureProtocol getSingleNodeReconfigureProxy( - String address) throws IOException { + HddsProtos.NodeType nodeType, String address) throws IOException { OzoneConfiguration ozoneConf = new OzoneConfiguration(); UserGroupInformation user = UserGroupInformation.getCurrentUser(); InetSocketAddress nodeAddr = NetUtils.createSocketAddr(address); - return new ReconfigureProtocolClientSideTranslatorPB( + return new ReconfigureProtocolClientSideTranslatorPB(nodeType, nodeAddr, user, ozoneConf); } public static void parallelExecute(ExecutorService executorService, - List nodes, Consumer operation) { + List nodes, BiConsumer operation) { AtomicInteger successCount = new AtomicInteger(); AtomicInteger failCount = new AtomicInteger(); if (nodes != null) { for (T node : nodes) { executorService.submit(() -> { try { - operation.accept(node); + operation.accept(HddsProtos.NodeType.DATANODE, node); successCount.incrementAndGet(); } catch (Exception e) { failCount.incrementAndGet(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java index 47a0ec2299c2..8123d5358a46 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java @@ -16,13 +16,11 @@ */ package org.apache.hadoop.ozone.admin.scm; -import com.google.gson.Gson; -import com.google.gson.JsonIOException; -import com.google.gson.JsonSyntaxException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.common.helpers.DeletedBlocksTransactionInfoWrapper; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; import java.io.FileInputStream; @@ -74,12 +72,11 @@ public void execute(ScmClient client) throws IOException { if (group.resetAll) { count = client.resetDeletedBlockRetryCount(new ArrayList<>()); } else if (group.fileName != null) { - Gson gson = new Gson(); List txIDs; try (InputStream in = new FileInputStream(group.fileName); Reader fileReader = new InputStreamReader(in, StandardCharsets.UTF_8)) { - DeletedBlocksTransactionInfoWrapper[] txns = gson.fromJson(fileReader, + DeletedBlocksTransactionInfoWrapper[] txns = JsonUtils.readFromReader(fileReader, DeletedBlocksTransactionInfoWrapper[].class); txIDs = Arrays.stream(txns) .map(DeletedBlocksTransactionInfoWrapper::getTxID) @@ -92,10 +89,12 @@ public void execute(ScmClient client) throws IOException { System.out.println("The last loaded txID: " + txIDs.get(txIDs.size() - 1)); } - } catch (JsonIOException | JsonSyntaxException | IOException ex) { - System.out.println("Cannot parse the file " + group.fileName); - throw new IOException(ex); + } catch (IOException ex) { + final String message = "Failed to parse the file " + group.fileName + ": " + ex.getMessage(); + System.out.println(message); + throw new IOException(message, ex); } + count = client.resetDeletedBlockRetryCount(txIDs); } else { if (group.txList == null || group.txList.isEmpty()) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java index e8ced23b348f..f66f4f3abda2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java @@ -33,8 +33,7 @@ */ @Command(name = "ozonemanagers", aliases = {"-ozonemanagers"}, - description = "gets list of ozone storage container " - + "manager nodes in the cluster", + description = "gets list of Ozone Manager nodes in the cluster", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) public class OzoneManagersCommandHandler implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index b71dd1c01566..5c311d49c93f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -24,11 +24,9 @@ import java.util.List; import java.util.Map; import java.util.HashSet; -import com.google.gson.GsonBuilder; -import com.google.gson.Gson; -import com.google.gson.JsonObject; -import com.google.gson.JsonArray; -import com.google.gson.JsonElement; + +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -40,10 +38,10 @@ import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -81,13 +79,12 @@ protected void execute(OzoneClient client, OzoneAddress address) XceiverClientManager xceiverClientManager = containerOperationClient.getXceiverClientManager()) { OzoneManagerProtocol ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient(); address.ensureKeyAddress(); - JsonElement element; - JsonObject result = new JsonObject(); + ObjectNode result = JsonUtils.createObjectNode(null); String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); String keyName = address.getKeyName(); - List tempchunks = null; - List chunkDetailsList = new ArrayList(); + List tempchunks; + List chunkDetailsList = new ArrayList<>(); HashSet chunkPaths = new HashSet<>(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setKeyName(keyName).build(); @@ -103,7 +100,7 @@ protected void execute(OzoneClient client, OzoneAddress address) } ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion .getConfiguredVersion(getConf()); - JsonArray responseArrayList = new JsonArray(); + ArrayNode responseArrayList = JsonUtils.createArrayNode(); for (OmKeyLocationInfo keyLocation : locationInfos) { ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo(); ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo(); @@ -129,24 +126,17 @@ protected void execute(OzoneClient client, OzoneAddress address) keyLocation.getBlockID().getDatanodeBlockIDProtobuf(); // doing a getBlock on all nodes Map - responses = null; - Map - readContainerResponses = null; - try { - responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, - datanodeBlockID, keyLocation.getToken()); - readContainerResponses = - containerOperationClient.readContainerFromAllNodes( - keyLocation.getContainerID(), pipeline); - } catch (InterruptedException e) { - LOG.error("Execution interrupted due to " + e); - Thread.currentThread().interrupt(); - } - JsonArray responseFromAllNodes = new JsonArray(); - for (Map.Entry - entry : responses.entrySet()) { + responses = + ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, + keyLocation.getBlockID().getDatanodeBlockIDProtobuf(), + keyLocation.getToken()); + Map readContainerResponses = + containerOperationClient.readContainerFromAllNodes( + keyLocation.getContainerID(), pipeline); + ArrayNode responseFromAllNodes = JsonUtils.createArrayNode(); + for (Map.Entry entry : responses.entrySet()) { chunkPaths.clear(); - JsonObject jsonObj = new JsonObject(); + ObjectNode jsonObj = JsonUtils.createObjectNode(null); if (entry.getValue() == null) { LOG.error("Cant execute getBlock on this node"); continue; @@ -158,7 +148,7 @@ protected void execute(OzoneClient client, OzoneAddress address) String fileName = containerLayoutVersion.getChunkFile(new File( getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), - ChunkInfo.getFromProtoBuf(chunkInfo)).toString(); + chunkInfo.getChunkName()).toString(); chunkPaths.add(fileName); ChunkDetails chunkDetails = new ChunkDetails(); chunkDetails.setChunkName(fileName); @@ -178,29 +168,29 @@ protected void execute(OzoneClient client, OzoneAddress address) containerChunkInfoVerbose.setChunkType(blockChunksType); containerChunkInfo.setChunkType(blockChunksType); } - Gson gson = new GsonBuilder().create(); + if (isVerbose()) { - element = gson.toJsonTree(containerChunkInfoVerbose); + jsonObj.set("Locations", + JsonUtils.createObjectNode(containerChunkInfoVerbose)); } else { - element = gson.toJsonTree(containerChunkInfo); + jsonObj.set("Locations", + JsonUtils.createObjectNode(containerChunkInfo)); } - jsonObj.addProperty("Datanode-HostName", entry.getKey() - .getHostName()); - jsonObj.addProperty("Datanode-IP", entry.getKey() - .getIpAddress()); - jsonObj.addProperty("Container-ID", containerId); - jsonObj.addProperty("Block-ID", keyLocation.getLocalID()); - jsonObj.add("Locations", element); + jsonObj.put("Datanode-HostName", entry.getKey().getHostName()); + jsonObj.put("Datanode-IP", entry.getKey().getIpAddress()); + jsonObj.put("Container-ID", containerId); + jsonObj.put("Block-ID", keyLocation.getLocalID()); responseFromAllNodes.add(jsonObj); } responseArrayList.add(responseFromAllNodes); + } catch (InterruptedException e) { + throw new RuntimeException(e); } finally { xceiverClientManager.releaseClientForReadData(xceiverClient, false); } } - result.add("KeyLocations", responseArrayList); - Gson gson2 = new GsonBuilder().setPrettyPrinting().create(); - String prettyJson = gson2.toJson(result); + result.set("KeyLocations", responseArrayList); + String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); System.out.println(prettyJson); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java index f88e08413d4b..130c1bca0fc8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java @@ -32,6 +32,7 @@ public class ContainerChunkInfo { private String containerPath; private List chunkInfos; + private HashSet files; private UUID pipelineID; private Pipeline pipeline; @@ -65,6 +66,27 @@ public void setChunkType(ChunkType chunkType) { this.chunkType = chunkType; } + public String getContainerPath() { + return containerPath; + } + + public List getChunkInfos() { + return chunkInfos; + } + + public HashSet getFiles() { + return files; + } + + public UUID getPipelineID() { + return pipelineID; + } + + public ChunkType getChunkType() { + return chunkType; + } + + @Override public String toString() { return "Container{" diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java new file mode 100644 index 000000000000..0c7ba187ce18 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java @@ -0,0 +1,277 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.debug; + +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientFactory; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.cli.ScmOption; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.utils.HAUtils; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneKeyLocation; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.client.rpc.RpcClient; +import org.apache.hadoop.ozone.shell.Handler; +import org.apache.hadoop.ozone.shell.OzoneAddress; +import org.apache.hadoop.ozone.shell.Shell; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.StringUtils; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static java.util.Collections.emptySet; +import static java.util.Comparator.comparing; + +/** + * Find EC keys affected by missing padding blocks (HDDS-10681). + */ +@CommandLine.Command(name = "find-missing-padding", + aliases = { "fmp" }, + description = "List all keys with any missing padding, optionally limited to a volume/bucket/key URI.") +@MetaInfServices(SubcommandWithParent.class) +public class FindMissingPadding extends Handler implements SubcommandWithParent { + + @CommandLine.ParentCommand + private OzoneDebug parent; + + @CommandLine.Mixin + private ScmOption scmOption; + + @CommandLine.Parameters(arity = "0..1", + description = Shell.OZONE_URI_DESCRIPTION) + private String uri; + + /** + * Keys possibly affected (those with any block under threshold size), + * grouped by container ID and block (local) ID. + */ + private final Map>> candidateKeys = new HashMap<>(); + + private final Set affectedKeys = new HashSet<>(); + + @Override + protected OzoneAddress getAddress() throws OzoneClientException { + return new OzoneAddress(uri); + } + + @Override + public Class getParentType() { + return OzoneDebug.class; + } + + @Override + protected void execute(OzoneClient ozoneClient, OzoneAddress address) throws IOException { + findCandidateKeys(ozoneClient, address); + checkContainers(ozoneClient); + handleAffectedKeys(); + } + + private void findCandidateKeys(OzoneClient ozoneClient, OzoneAddress address) throws IOException { + ObjectStore objectStore = ozoneClient.getObjectStore(); + ClientProtocol rpcClient = objectStore.getClientProxy(); + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + String keyName = address.getKeyName(); + if (!keyName.isEmpty()) { + checkKey(rpcClient, volumeName, bucketName, keyName); + } else if (!bucketName.isEmpty()) { + OzoneVolume volume = objectStore.getVolume(volumeName); + OzoneBucket bucket = volume.getBucket(bucketName); + checkBucket(bucket, rpcClient); + } else if (!volumeName.isEmpty()) { + OzoneVolume volume = objectStore.getVolume(volumeName); + checkVolume(volume, rpcClient); + } else { + for (Iterator it = objectStore.listVolumes(null); it.hasNext();) { + checkVolume(it.next(), rpcClient); + } + } + } + + private void checkVolume(OzoneVolume volume, ClientProtocol rpcClient) throws IOException { + for (Iterator it = volume.listBuckets(null); it.hasNext();) { + OzoneBucket bucket = it.next(); + checkBucket(bucket, rpcClient); + } + } + + private void checkBucket(OzoneBucket bucket, ClientProtocol rpcClient) throws IOException { + String volumeName = bucket.getVolumeName(); + String bucketName = bucket.getName(); + for (Iterator it = bucket.listKeys(null); it.hasNext();) { + OzoneKey key = it.next(); + if (isEC(key)) { + checkKey(rpcClient, volumeName, bucketName, key.getName()); + } else { + LOG.trace("Key {}/{}/{} is not EC", volumeName, bucketName, key.getName()); + } + } + } + + private void checkKey(ClientProtocol rpcClient, String volumeName, String bucketName, String keyName) + throws IOException { + OzoneKeyDetails keyDetails = rpcClient.getKeyDetails(volumeName, bucketName, keyName); + if (isEC(keyDetails)) { + checkECKey(keyDetails); + } + } + + private void checkECKey(OzoneKeyDetails keyDetails) { + List locations = keyDetails.getOzoneKeyLocations(); + if (!locations.isEmpty()) { + ECReplicationConfig ecConfig = (ECReplicationConfig) keyDetails.getReplicationConfig(); + long sizeThreshold = (ecConfig.getData() - 1) * (long) ecConfig.getEcChunkSize(); + for (OzoneKeyLocation loc : locations) { + long size = loc.getLength(); + if (size <= sizeThreshold) { + candidateKeys.computeIfAbsent(loc.getContainerID(), k -> new HashMap<>()) + .computeIfAbsent(loc.getLocalID(), k -> new HashSet<>()) + .add(keyDetails); + } + } + } else { + LOG.trace("Key {}/{}/{} has no locations", + keyDetails.getVolumeName(), keyDetails.getBucketName(), keyDetails.getName()); + } + } + + private static boolean isEC(OzoneKey key) { + return key.getReplicationConfig().getReplicationType() == HddsProtos.ReplicationType.EC; + } + + private void checkContainers(OzoneClient ozoneClient) throws IOException { + if (candidateKeys.isEmpty()) { + return; + } + + SecurityConfig securityConfig = new SecurityConfig(getConf()); + final boolean tokenEnabled = securityConfig.isSecurityEnabled() && securityConfig.isContainerTokenEnabled(); + StorageContainerLocationProtocol scmContainerClient = HAUtils.getScmContainerClient(getConf()); + RpcClient rpcClient = (RpcClient) ozoneClient.getProxy(); + XceiverClientFactory xceiverClientManager = rpcClient.getXceiverClientManager(); + Pipeline.Builder pipelineBuilder = Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setState(Pipeline.PipelineState.OPEN) + .setReplicationConfig(StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); + + try (ScmClient scmClient = scmOption.createScmClient()) { + for (Map.Entry>> entry : candidateKeys.entrySet()) { + long containerID = entry.getKey(); + Map> blockToKeysMap = entry.getValue(); + + ContainerInfo container = scmClient.getContainer(containerID); + if (container.getState() != HddsProtos.LifeCycleState.CLOSED) { + LOG.trace("Skip container {} as it is not CLOSED, rather {}", containerID, container.getState()); + continue; + } + + Token token = tokenEnabled + ? scmContainerClient.getContainerToken(ContainerID.valueOf(containerID)) + : null; + + List containerReplicas = scmClient.getContainerReplicas(containerID); + + LOG.debug("Container {} replicas: {}", containerID, containerReplicas.stream() + .sorted(comparing(ContainerReplicaInfo::getReplicaIndex) + .thenComparing(ContainerReplicaInfo::getState) + .thenComparing(r -> r.getDatanodeDetails().getUuidString())) + .map(r -> "index=" + r.getReplicaIndex() + " keys=" + r.getKeyCount() + + " state=" + r.getState() + " dn=" + r.getDatanodeDetails()) + .collect(Collectors.joining(", ")) + ); + + for (ContainerReplicaInfo replica : containerReplicas) { + if (!HddsProtos.LifeCycleState.CLOSED.name().equals(replica.getState())) { + LOG.trace("Ignore container {} replica {} at {} in {} state", + replica.getContainerID(), replica.getReplicaIndex(), replica.getDatanodeDetails(), replica.getState()); + continue; + } + + final Set missingBlocks = new HashSet<>(blockToKeysMap.keySet()); + Pipeline pipeline = pipelineBuilder + .setNodes(Collections.singletonList(replica.getDatanodeDetails())) + .build(); + XceiverClientSpi datanodeClient = xceiverClientManager.acquireClientForReadData(pipeline); + try { + ContainerProtos.ListBlockResponseProto listBlockResponse = ContainerProtocolCalls.listBlock( + datanodeClient, containerID, null, Integer.MAX_VALUE, token); + for (ContainerProtos.BlockData blockData : listBlockResponse.getBlockDataList()) { + missingBlocks.remove(blockData.getBlockID().getLocalID()); + } + if (missingBlocks.isEmpty()) { + LOG.debug("All {} blocks in container {} found on replica {} at {}", + blockToKeysMap.keySet().size(), containerID, replica.getReplicaIndex(), replica.getDatanodeDetails()); + } else { + LOG.info("Found {} blocks missing from container {} on replica {} at {}", + missingBlocks.size(), containerID, replica.getReplicaIndex(), replica.getDatanodeDetails()); + missingBlocks.forEach(b -> affectedKeys.addAll(blockToKeysMap.getOrDefault(b, emptySet()))); + } + } finally { + xceiverClientManager.releaseClientForReadData(datanodeClient, false); + } + } + } + } + } + + private void handleAffectedKeys() { + if (!affectedKeys.isEmpty()) { + out().println(StringUtils.join("\t", Arrays.asList( + "Key", "Size", "Replication" + ))); + for (OzoneKey key : affectedKeys) { + out().println(StringUtils.join("\t", Arrays.asList( + key.getVolumeName() + "/" + key.getBucketName() + "/" + key.getName(), + key.getDataSize(), + key.getReplicationConfig().getReplication() + ))); + } + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java index 30f2b4eca1fd..48ed7c74ae7d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java @@ -17,14 +17,11 @@ package org.apache.hadoop.ozone.debug; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; @@ -36,6 +33,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.keys.KeyHandler; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import jakarta.annotation.Nonnull; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -129,18 +128,17 @@ protected void execute(OzoneClient client, OzoneAddress address) replicasWithoutChecksum = noChecksumClient .getKeysEveryReplicas(volumeName, bucketName, keyName); - JsonObject result = new JsonObject(); - result.addProperty(JSON_PROPERTY_FILE_NAME, + ObjectNode result = JsonUtils.createObjectNode(null); + result.put(JSON_PROPERTY_FILE_NAME, volumeName + "/" + bucketName + "/" + keyName); - result.addProperty(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize()); + result.put(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize()); - JsonArray blocks = new JsonArray(); + ArrayNode blocks = JsonUtils.createArrayNode(); downloadReplicasAndCreateManifest(keyName, replicas, replicasWithoutChecksum, dir, blocks); - result.add(JSON_PROPERTY_FILE_BLOCKS, blocks); + result.set(JSON_PROPERTY_FILE_BLOCKS, blocks); - Gson gson = new GsonBuilder().setPrettyPrinting().create(); - String prettyJson = gson.toJson(result); + String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); String manifestFileName = keyName + "_manifest"; System.out.println("Writing manifest file : " + manifestFileName); @@ -158,25 +156,22 @@ private void downloadReplicasAndCreateManifest( Map> replicas, Map> replicasWithoutChecksum, - File dir, JsonArray blocks) throws IOException { + File dir, ArrayNode blocks) throws IOException { int blockIndex = 0; for (Map.Entry> block : replicas.entrySet()) { - JsonObject blockJson = new JsonObject(); - JsonArray replicasJson = new JsonArray(); + ObjectNode blockJson = JsonUtils.createObjectNode(null); + ArrayNode replicasJson = JsonUtils.createArrayNode(); blockIndex += 1; - blockJson.addProperty(JSON_PROPERTY_BLOCK_INDEX, blockIndex); + blockJson.put(JSON_PROPERTY_BLOCK_INDEX, blockIndex); OmKeyLocationInfo locationInfo = block.getKey(); - blockJson.addProperty(JSON_PROPERTY_BLOCK_CONTAINERID, + blockJson.put(JSON_PROPERTY_BLOCK_CONTAINERID, locationInfo.getContainerID()); - blockJson.addProperty(JSON_PROPERTY_BLOCK_LOCALID, - locationInfo.getLocalID()); - blockJson.addProperty(JSON_PROPERTY_BLOCK_LENGTH, - locationInfo.getLength()); - blockJson.addProperty(JSON_PROPERTY_BLOCK_OFFSET, - locationInfo.getOffset()); + blockJson.put(JSON_PROPERTY_BLOCK_LOCALID, locationInfo.getLocalID()); + blockJson.put(JSON_PROPERTY_BLOCK_LENGTH, locationInfo.getLength()); + blockJson.put(JSON_PROPERTY_BLOCK_OFFSET, locationInfo.getOffset()); BlockID blockID = locationInfo.getBlockID(); Map blockReplicasWithoutChecksum = @@ -186,12 +181,10 @@ private void downloadReplicasAndCreateManifest( replica : block.getValue().entrySet()) { DatanodeDetails datanode = replica.getKey(); - JsonObject replicaJson = new JsonObject(); + ObjectNode replicaJson = JsonUtils.createObjectNode(null); - replicaJson.addProperty(JSON_PROPERTY_REPLICA_HOSTNAME, - datanode.getHostName()); - replicaJson.addProperty(JSON_PROPERTY_REPLICA_UUID, - datanode.getUuidString()); + replicaJson.put(JSON_PROPERTY_REPLICA_HOSTNAME, datanode.getHostName()); + replicaJson.put(JSON_PROPERTY_REPLICA_UUID, datanode.getUuidString()); String fileName = keyName + "_block" + blockIndex + "_" + datanode.getHostName(); @@ -202,8 +195,7 @@ private void downloadReplicasAndCreateManifest( Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING); } catch (IOException e) { Throwable cause = e.getCause(); - replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION, - e.getMessage()); + replicaJson.put(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage()); if (cause instanceof OzoneChecksumException) { try (InputStream is = getReplica( blockReplicasWithoutChecksum, datanode)) { @@ -213,11 +205,10 @@ private void downloadReplicasAndCreateManifest( } replicasJson.add(replicaJson); } - blockJson.add(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson); + blockJson.set(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson); blocks.add(blockJson); - blockReplicasWithoutChecksum.values() - .forEach(each -> IOUtils.close(LOG, each)); + IOUtils.close(LOG, blockReplicasWithoutChecksum.values()); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 8afe064299f2..20acad0562ab 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -265,7 +265,7 @@ private void shutdown() { */ private void reportAnyFailure() { if (failureCounter.get() > 0) { - throw new RuntimeException("One ore more freon test is failed."); + throw new RuntimeException("One or more freon test is failed."); } } @@ -285,7 +285,7 @@ public void init() { attemptCounter = new AtomicLong(0); if (prefix.length() == 0) { - prefix = RandomStringUtils.randomAlphanumeric(10).toLowerCase(); + prefix = !allowEmptyPrefix() ? RandomStringUtils.randomAlphanumeric(10).toLowerCase() : ""; } else { //replace environment variables to support multi-node execution prefix = resolvePrefix(prefix); @@ -306,8 +306,8 @@ public void init() { "Invalid command, " + "the testNo must be a positive integer"); } - LOG.info("Executing test with prefix {} " + - "and number-of-tests {}", prefix, testNo); + LOG.info("Executing test with prefix {} and number-of-tests {}", + prefix.isEmpty() ? "''" : prefix, testNo); pathSchema = new PathSchema(prefix); @@ -541,6 +541,15 @@ public static byte[] getDigest(InputStream stream) throws IOException { return dig.digest(stream); } + /** + * When no prefix is specified, + * if allowEmptyPrefix is false, a random prefix will be used; + * if allowEmptyPrefix is true, an empty prefix will be used. + */ + public boolean allowEmptyPrefix() { + return false; + } + public String getPrefix() { return prefix; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java new file mode 100644 index 000000000000..3b4d25cddaf7 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.freon; + +import com.codahale.metrics.Timer; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.XceiverClientFactory; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; +import org.apache.hadoop.hdds.utils.HAUtils; +import org.apache.hadoop.ozone.OzoneSecurityUtil; +import org.apache.hadoop.ozone.util.PayloadUtils; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; + +import static org.apache.hadoop.hdds.client.ReplicationConfig.getLegacyFactor; + +/** + * Utility to generate RPC request to DN. + */ +@Command(name = "dn-echo", + aliases = "dne", + description = + "Generate echo RPC request to DataNode", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true, + showDefaultValues = true) +public class DNRPCLoadGenerator extends BaseFreonGenerator + implements Callable { + private static final Logger LOG = + LoggerFactory.getLogger(DNRPCLoadGenerator.class); + private static final int RPC_PAYLOAD_MULTIPLICATION_FACTOR = 1024; + private static final int MAX_SIZE_KB = 2097151; + private Timer timer; + private OzoneConfiguration configuration; + private ByteString payloadReqBytes; + private int payloadRespSize; + private List clients; + private String encodedContainerToken; + @Option(names = {"--payload-req"}, + description = + "Specifies the size of payload in KB in RPC request. ", + defaultValue = "0") + private int payloadReqSizeKB = 0; + + @Option(names = {"--payload-resp"}, + description = + "Specifies the size of payload in KB in RPC response. ", + defaultValue = "0") + private int payloadRespSizeKB = 0; + + @Option(names = {"--container-id"}, + description = "Send echo to DataNodes associated with this container") + private long containerID; + + @Option(names = {"--sleep-time-ms"}, + description = "Let DataNode to pause for a duration (in milliseconds) for each request", + defaultValue = "0") + private int sleepTimeMs = 0; + + @Option(names = {"--clients"}, + description = "number of xceiver clients", + defaultValue = "1") + private int numClients = 1; + + @Option(names = {"--read-only"}, + description = "if Ratis, read only or not", + defaultValue = "false") + private boolean readOnly = false; + + @Option(names = {"--ratis"}, + description = "if Ratis or grpc", + defaultValue = "false") + private boolean ratis = false; + + @CommandLine.ParentCommand + private Freon freon; + + // empy constructor for picocli + DNRPCLoadGenerator() { + } + + @VisibleForTesting + DNRPCLoadGenerator(OzoneConfiguration ozoneConfiguration) { + this.configuration = ozoneConfiguration; + } + + @Override + public Void call() throws Exception { + Preconditions.checkArgument(payloadReqSizeKB >= 0, + "OM echo request payload size should be positive value or zero."); + Preconditions.checkArgument(payloadRespSizeKB >= 0, + "OM echo response payload size should be positive value or zero."); + + if (configuration == null) { + configuration = freon.createOzoneConfiguration(); + } + ContainerOperationClient scmClient = new ContainerOperationClient(configuration); + ContainerInfo containerInfo = scmClient.getContainer(containerID); + + List pipelineList = scmClient.listPipelines(); + Pipeline pipeline = pipelineList.stream() + .filter(p -> p.getId().equals(containerInfo.getPipelineID())) + .findFirst() + .orElse(null); + // If GRPC, use STANDALONE pipeline + if (!ratis) { + if (!readOnly) { + LOG.warn("Read only is not set to true for GRPC, setting it to true"); + readOnly = true; + } + pipeline = Pipeline.newBuilder(pipeline) + .setReplicationConfig(StandaloneReplicationConfig.getInstance( + getLegacyFactor(pipeline.getReplicationConfig()))) + .build(); + } + encodedContainerToken = scmClient.getEncodedContainerToken(containerID); + XceiverClientFactory xceiverClientManager; + if (OzoneSecurityUtil.isSecurityEnabled(configuration)) { + CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(null, configuration); + xceiverClientManager = new XceiverClientManager(configuration, + configuration.getObject(XceiverClientManager.ScmClientConfig.class), + new ClientTrustManager(caCerts, null)); + } else { + xceiverClientManager = new XceiverClientManager(configuration); + } + clients = new ArrayList<>(numClients); + for (int i = 0; i < numClients; i++) { + clients.add(xceiverClientManager.acquireClient(pipeline)); + } + + init(); + payloadReqBytes = PayloadUtils.generatePayloadProto3(payloadSizeInBytes(payloadReqSizeKB)); + payloadRespSize = calculateMaxPayloadSize(payloadRespSizeKB); + timer = getMetrics().timer("rpc-payload"); + try { + runTests(this::sendRPCReq); + } finally { + for (XceiverClientSpi client : clients) { + xceiverClientManager.releaseClient(client, false); + } + xceiverClientManager.close(); + scmClient.close(); + } + return null; + } + + private int calculateMaxPayloadSize(int payloadSizeKB) { + if (payloadSizeKB > 0) { + return Math.min( + Math.toIntExact((long)payloadSizeKB * + RPC_PAYLOAD_MULTIPLICATION_FACTOR), + MAX_SIZE_KB); + } + return 0; + } + + private int payloadSizeInBytes(int payloadSizeKB) { + return payloadSizeKB > 0 ? payloadSizeKB * 1024 : 0; + } + + private void sendRPCReq(long l) throws Exception { + timer.time(() -> { + int clientIndex = (numClients == 1) ? 0 : (int)l % numClients; + ContainerProtos.EchoResponseProto response = + ContainerProtocolCalls.echo(clients.get(clientIndex), encodedContainerToken, + containerID, payloadReqBytes, payloadRespSize, sleepTimeMs, readOnly); + return null; + }); + } +} + + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java index b290da2da1f5..2bbf8b6d5b24 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java @@ -193,7 +193,7 @@ private ChecksumData computeChecksum(ContainerCommandResponseProto response) throws OzoneChecksumException { ContainerProtos.ReadChunkResponseProto readChunk = response.getReadChunk(); if (readChunk.hasData()) { - return checksum.computeChecksum(readChunk.getData().toByteArray()); + return checksum.computeChecksum(readChunk.getData().asReadOnlyByteBuffer()); } else { return checksum.computeChecksum( readChunk.getDataBuffers().getBuffersList()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java index 20800757b1aa..2b178ac0aec2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.freon; -import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.DatanodeVersion; @@ -43,6 +42,7 @@ import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; @@ -186,7 +186,7 @@ public Void call() throws Exception { } catch (InterruptedException e) { throw new RuntimeException(e); } - scmClients.values().forEach(IOUtils::closeQuietly); + IOUtils.closeQuietly(scmClients.values()); IOUtils.closeQuietly(reconClient); LOGGER.info("Successfully closed all the used resources"); saveDatanodesToFile(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index bd5510695fa1..eba5446e339c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -73,7 +73,9 @@ OzoneClientKeyReadWriteListOps.class, RangeKeysGenerator.class, DatanodeSimulator.class, - OmMetadataGenerator.class + OmMetadataGenerator.class, + DNRPCLoadGenerator.class, + MultiSyncer.class }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/MultiSyncer.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/MultiSyncer.java new file mode 100644 index 000000000000..968dd471e5d6 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/MultiSyncer.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.freon; + +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StreamCapabilities; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + +import com.codahale.metrics.Timer; +import org.apache.hadoop.hdds.conf.StorageSize; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +/** + * Data generator tool test hsync/write synchronization performance. + */ +@Command(name = "ms", + aliases = "multi-syncer", + description = "Create a thread pool of hsync threads and a writer thread", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true, + showDefaultValues = true) +public class MultiSyncer extends BaseFreonGenerator + implements Callable { + + @CommandLine.ParentCommand + private Freon freon; + + @Option(names = {"--path"}, + description = "Hadoop FS file system path. Use full path.", + defaultValue = "o3fs://bucket1.vol1") + private String rootPath; + + @Option(names = {"-s", "--size"}, + description = "Size of the generated files. " + + StorageSizeConverter.STORAGE_SIZE_DESCRIPTION, + defaultValue = "10MB", + converter = StorageSizeConverter.class) + private StorageSize fileSize; + + @Option(names = {"--bytes-per-write"}, + description = "Size of each write", + defaultValue = "1024") + private int writeSize; + + @Option(names = {"--syncer-per-writer"}, + description = "Number of sycer threads associated with each writer", + defaultValue = "5") + private int syncerPerWriter; + + private ContentGenerator contentGenerator; + + private Timer timer; + + private OzoneConfiguration configuration; + private FileSystem fileSystem; + + // empy constructor for picocli + MultiSyncer() { + } + + @VisibleForTesting + MultiSyncer(OzoneConfiguration ozoneConfiguration) { + this.configuration = ozoneConfiguration; + } + + + @Override + public Void call() throws Exception { + init(); + + if (configuration == null) { + configuration = freon.createOzoneConfiguration(); + } + URI uri = URI.create(rootPath); + fileSystem = FileSystem.get(uri, configuration); + + Path file = new Path(rootPath + "/" + generateObjectName(0)); + fileSystem.mkdirs(file.getParent()); + + timer = getMetrics().timer("file-create"); + runTests(this::createFile); + + fileSystem.close(); + + return null; + } + + private void createFile(long counter) throws Exception { + Path file = new Path(rootPath + "/" + generateObjectName(counter)); + + contentGenerator = + new ContentGenerator(fileSize.toBytes(), writeSize, writeSize); + + ExecutorService executor = Executors.newFixedThreadPool(syncerPerWriter); + timer.time(() -> { + try (FSDataOutputStream output = fileSystem.create(file)) { + if (!StoreImplementationUtils.hasCapability( + output, StreamCapabilities.HSYNC)) { + throw new UnsupportedOperationException( + "Abort. The output stream of file " + file + " does not support hsync"); + } + + AtomicBoolean shutdown = new AtomicBoolean(); + startSyncer(executor, output, shutdown); + contentGenerator.write(output); + + shutdown.set(true); + executor.shutdown(); + executor.awaitTermination(10, TimeUnit.SECONDS); + } + return null; + }); + } + + void startSyncer(ExecutorService executor, FSDataOutputStream output, AtomicBoolean shutdown) { + // Create a Runnable task + Runnable task = () -> { + // Continuous task to be executed + while (!shutdown.get()) { + try { + output.hsync(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }; + + // Submit the task to the ExecutorService + for (int n = 0; n < syncerPerWriter; n++) { + executor.submit(task); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java index 210ec5ebfd53..9c98817185e3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java @@ -102,6 +102,7 @@ public Void call() throws Exception { private void createKey(long counter) throws Exception { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + String ownerName = ugi.getShortUserName(); OmKeyArgs keyArgs = new Builder() .setBucketName(bucketName) .setVolumeName(volumeName) @@ -110,6 +111,7 @@ private void createKey(long counter) throws Exception { .setLocationInfoList(new ArrayList<>()) .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), ALL, ALL)) + .setOwnerName(ownerName) .build(); timer.time(() -> { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java index 60af701e1496..24060b0bac8f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java @@ -461,4 +461,8 @@ private Void performReadOperation(ReadOperation readOp, byte[] buffer) throws IO } } + @Override + public boolean allowEmptyPrefix() { + return true; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java index 958df4c11a14..191218529709 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java @@ -19,11 +19,12 @@ import com.codahale.metrics.Timer; import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import java.util.concurrent.Callable; + +import org.apache.hadoop.ozone.util.PayloadUtils; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -42,8 +43,6 @@ public class OmRPCLoadGenerator extends BaseFreonGenerator implements Callable { - private static final int RPC_PAYLOAD_MULTIPLICATION_FACTOR = 1024; - private static final int MAX_SIZE_KB = 2097151; private Timer timer; private OzoneConfiguration configuration; private OzoneManagerProtocolClientSideTranslatorPB[] clients; @@ -88,9 +87,8 @@ public Void call() throws Exception { } init(); - payloadReqBytes = RandomUtils.nextBytes( - calculateMaxPayloadSize(payloadReqSizeKB)); - payloadRespSize = calculateMaxPayloadSize(payloadRespSizeKB); + payloadReqBytes = PayloadUtils.generatePayload(payloadSizeInBytes(payloadReqSizeKB)); + payloadRespSize = payloadSizeInBytes(payloadRespSizeKB); timer = getMetrics().timer("rpc-payload"); try { runTests(this::sendRPCReq); @@ -104,14 +102,8 @@ public Void call() throws Exception { return null; } - private int calculateMaxPayloadSize(int payloadSizeKB) { - if (payloadSizeKB > 0) { - return Math.min( - Math.toIntExact((long)payloadSizeKB * - RPC_PAYLOAD_MULTIPLICATION_FACTOR), - MAX_SIZE_KB); - } - return 0; + private int payloadSizeInBytes(int payloadSizeKB) { + return payloadSizeKB > 0 ? payloadSizeKB * 1024 : 0; } private void sendRPCReq(long l) throws Exception { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java index 022eacde4af4..ba7456ef64b6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java @@ -271,4 +271,9 @@ public String getKeyName() { } return keyNameSb.toString(); } + + @Override + public boolean allowEmptyPrefix() { + return true; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java index 3a43ddd8ab09..dbca12c8b26d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java @@ -60,7 +60,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import com.codahale.metrics.Timer; @@ -111,6 +110,7 @@ public class GeneratorDatanode extends BaseGenerator { private int overlap; private ChunkManager chunkManager; + private BlockManagerImpl blockManager; private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; @@ -133,7 +133,7 @@ public Void call() throws Exception { config = createOzoneConfiguration(); - BlockManager blockManager = new BlockManagerImpl(config); + blockManager = new BlockManagerImpl(config); chunkManager = ChunkManagerFactory .createChunkManager(config, blockManager, null); @@ -286,7 +286,7 @@ public void generateData(long index) throws Exception { writtenBytes += currentChunkSize; } - BlockManagerImpl.persistPutBlock(container, blockData, config, true); + blockManager.persistPutBlock(container, blockData, true); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java index b8509d60c9cd..7390488c8158 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java @@ -156,10 +156,10 @@ private void writeOmBucketVolume() throws IOException { .setQuotaInBytes(100L) .addOzoneAcls( new OzoneAcl(IAccessAuthorizer.ACLIdentityType.WORLD, "", - IAccessAuthorizer.ACLType.ALL, ACCESS)) + ACCESS, IAccessAuthorizer.ACLType.ALL)) .addOzoneAcls( new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, getUserId(), - IAccessAuthorizer.ACLType.ALL, ACCESS) + ACCESS, IAccessAuthorizer.ACLType.ALL) ).build(); volTable.put("/" + volumeName, omVolumeArgs); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java index cfdc924486ab..927e9186ff50 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java @@ -159,8 +159,7 @@ public static void generateConfigurations(String path, } OzoneConfiguration.XMLConfiguration generatedConfig = - new OzoneConfiguration.XMLConfiguration(); - generatedConfig.setProperties(requiredProperties); + new OzoneConfiguration.XMLConfiguration(requiredProperties); File output = new File(path, "ozone-site.xml"); if (output.createNewFile()) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java new file mode 100644 index 000000000000..166445228089 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import picocli.CommandLine; + +import java.nio.charset.StandardCharsets; +import java.util.Scanner; + +/** + * Ozone Repair Command line tool. + */ +@CommandLine.Command(name = "ozone repair", + description = "Operational tool to repair Ozone", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class OzoneRepair extends GenericCli { + + public static final String WARNING_SYS_USER_MESSAGE = + "ATTENTION: Running as user %s. Make sure this is the same user used to run the Ozone process." + + " Are you sure you want to continue (y/N)? "; + + + private OzoneConfiguration ozoneConf; + + public OzoneRepair() { + super(OzoneRepair.class); + } + + @VisibleForTesting + public OzoneRepair(OzoneConfiguration configuration) { + super(OzoneRepair.class); + this.ozoneConf = configuration; + } + + public OzoneConfiguration getOzoneConf() { + if (ozoneConf == null) { + ozoneConf = createOzoneConfiguration(); + } + return ozoneConf; + } + + /** + * Main for the Ozone Repair shell Command handling. + * + * @param argv - System Args Strings[] + * @throws Exception + */ + public static void main(String[] argv) throws Exception { + new OzoneRepair().run(argv); + } + + @Override + public int execute(String[] argv) { + String currentUser = getSystemUserName(); + if (!("y".equalsIgnoreCase(getConsoleReadLineWithFormat(currentUser)))) { + System.out.println("Aborting command."); + return 1; + } + System.out.println("Run as user: " + currentUser); + + return super.execute(argv); + } + + public String getSystemUserName() { + return System.getProperty("user.name"); + } + + public String getConsoleReadLineWithFormat(String currentUser) { + System.err.printf(WARNING_SYS_USER_MESSAGE, currentUser); + return (new Scanner(System.in, StandardCharsets.UTF_8.name())).nextLine().trim(); + } + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java new file mode 100644 index 000000000000..0f36934ec14d --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Ozone Repair CLI for RocksDB. + */ +@CommandLine.Command(name = "ldb", + description = "Operational tool to repair RocksDB table.") +@MetaInfServices(SubcommandWithParent.class) +public class RDBRepair implements Callable, SubcommandWithParent { + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Database File Path") + private String dbPath; + + public String getDbPath() { + return dbPath; + } + + @Override + public Void call() { + GenericCli.missingSubcommand(spec); + return null; + } + + @Override + public Class getParentType() { + return OzoneRepair.class; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java new file mode 100644 index 000000000000..ec5e2f8f9366 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.om; + +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.StringCodec; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.ozone.debug.RocksDBUtils; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.repair.RDBRepair; +import org.apache.hadoop.ozone.shell.bucket.BucketUri; +import org.kohsuke.MetaInfServices; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import picocli.CommandLine; +import picocli.CommandLine.Model.CommandSpec; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE; + +/** + * Tool to repair snapshotInfoTable in case it has corrupted entries. + */ +@CommandLine.Command( + name = "snapshot", + description = "CLI to update global and path previous snapshot for a snapshot in case snapshot chain is corrupted." +) +@MetaInfServices(SubcommandWithParent.class) +public class SnapshotRepair implements Callable, SubcommandWithParent { + + @CommandLine.Spec + private static CommandSpec spec; + + @CommandLine.ParentCommand + private RDBRepair parent; + + @CommandLine.Mixin + private BucketUri bucketUri; + + @CommandLine.Parameters(description = "Snapshot name to update", index = "1") + private String snapshotName; + + @CommandLine.Option(names = {"--global-previous", "--gp"}, + required = true, + description = "Global previous snapshotId to set for the given snapshot") + private UUID globalPreviousSnapshotId; + + @CommandLine.Option(names = {"--path-previous", "--pp"}, + required = true, + description = "Path previous snapshotId to set for the given snapshot") + private UUID pathPreviousSnapshotId; + + @CommandLine.Option(names = {"--dry-run"}, + required = true, + description = "To dry-run the command.", defaultValue = "true") + private boolean dryRun; + + @Override + public Void call() throws Exception { + List cfHandleList = new ArrayList<>(); + List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath()); + + try (ManagedRocksDB db = ManagedRocksDB.open(parent.getDbPath(), cfDescList, cfHandleList)) { + ColumnFamilyHandle snapshotInfoCfh = getSnapshotInfoCfh(cfHandleList); + if (snapshotInfoCfh == null) { + System.err.println(SNAPSHOT_INFO_TABLE + " is not in a column family in DB for the given path."); + return null; + } + + String snapshotInfoTableKey = SnapshotInfo.getTableKey(bucketUri.getValue().getVolumeName(), + bucketUri.getValue().getBucketName(), snapshotName); + + SnapshotInfo snapshotInfo = getSnapshotInfo(db, snapshotInfoCfh, snapshotInfoTableKey); + if (snapshotInfo == null) { + System.err.println(snapshotName + " does not exist for given bucketUri: " + OM_KEY_PREFIX + + bucketUri.getValue().getVolumeName() + OM_KEY_PREFIX + bucketUri.getValue().getBucketName()); + return null; + } + + // snapshotIdSet is the set of the all existed snapshots ID to make that the provided global previous and path + // previous exist and after the update snapshot does not point to ghost snapshot. + Set snapshotIdSet = getSnapshotIdSet(db, snapshotInfoCfh); + + if (Objects.equals(snapshotInfo.getSnapshotId(), globalPreviousSnapshotId)) { + System.err.println("globalPreviousSnapshotId: '" + globalPreviousSnapshotId + + "' is equal to given snapshot's ID: '" + snapshotInfo.getSnapshotId() + "'."); + return null; + } + + if (Objects.equals(snapshotInfo.getSnapshotId(), pathPreviousSnapshotId)) { + System.err.println("pathPreviousSnapshotId: '" + pathPreviousSnapshotId + + "' is equal to given snapshot's ID: '" + snapshotInfo.getSnapshotId() + "'."); + return null; + } + + if (!snapshotIdSet.contains(globalPreviousSnapshotId)) { + System.err.println("globalPreviousSnapshotId: '" + globalPreviousSnapshotId + + "' does not exist in snapshotInfoTable."); + return null; + } + + if (!snapshotIdSet.contains(pathPreviousSnapshotId)) { + System.err.println("pathPreviousSnapshotId: '" + pathPreviousSnapshotId + + "' does not exist in snapshotInfoTable."); + return null; + } + + snapshotInfo.setGlobalPreviousSnapshotId(globalPreviousSnapshotId); + snapshotInfo.setPathPreviousSnapshotId(pathPreviousSnapshotId); + + if (dryRun) { + System.out.println("SnapshotInfo would be updated to : " + snapshotInfo); + } else { + byte[] snapshotInfoBytes = SnapshotInfo.getCodec().toPersistedFormat(snapshotInfo); + db.get() + .put(snapshotInfoCfh, StringCodec.get().toPersistedFormat(snapshotInfoTableKey), snapshotInfoBytes); + + System.out.println("Snapshot Info is updated to : " + + getSnapshotInfo(db, snapshotInfoCfh, snapshotInfoTableKey)); + } + } catch (RocksDBException exception) { + System.err.println("Failed to update the RocksDB for the given path: " + parent.getDbPath()); + System.err.println( + "Make sure that Ozone entity (OM, SCM or DN) is not running for the give dbPath and current host."); + System.err.println(exception); + } finally { + IOUtils.closeQuietly(cfHandleList); + } + + return null; + } + + private Set getSnapshotIdSet(ManagedRocksDB db, ColumnFamilyHandle snapshotInfoCfh) + throws IOException { + Set snapshotIdSet = new HashSet<>(); + try (ManagedRocksIterator iterator = new ManagedRocksIterator(db.get().newIterator(snapshotInfoCfh))) { + iterator.get().seekToFirst(); + + while (iterator.get().isValid()) { + SnapshotInfo snapshotInfo = SnapshotInfo.getCodec().fromPersistedFormat(iterator.get().value()); + snapshotIdSet.add(snapshotInfo.getSnapshotId()); + iterator.get().next(); + } + } + return snapshotIdSet; + } + + private ColumnFamilyHandle getSnapshotInfoCfh(List cfHandleList) throws RocksDBException { + byte[] nameBytes = SNAPSHOT_INFO_TABLE.getBytes(StandardCharsets.UTF_8); + + for (ColumnFamilyHandle cf : cfHandleList) { + if (Arrays.equals(cf.getName(), nameBytes)) { + return cf; + } + } + + return null; + } + + private SnapshotInfo getSnapshotInfo(ManagedRocksDB db, ColumnFamilyHandle snapshotInfoCfh, String snapshotInfoLKey) + throws IOException, RocksDBException { + byte[] bytes = db.get().get(snapshotInfoCfh, StringCodec.get().toPersistedFormat(snapshotInfoLKey)); + return bytes != null ? SnapshotInfo.getCodec().fromPersistedFormat(bytes) : null; + } + + @Override + public Class getParentType() { + return RDBRepair.class; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java new file mode 100644 index 000000000000..9e2324a4a6f8 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * OM related repair tools. + */ +package org.apache.hadoop.ozone.repair.om; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java new file mode 100644 index 000000000000..bd382d04cf79 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Ozone Repair tools. + */ +package org.apache.hadoop.ozone.repair; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java index 68a2065f4ae0..f2fa1a8c4f3d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java @@ -39,6 +39,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_HTTP_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RPC_SCHEME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import org.apache.http.client.utils.URIBuilder; @@ -157,21 +158,23 @@ public OzoneClient createClient(MutableConfigurationSource conf) ozoneURI.getPort(), conf); } } else { // When host is not specified - - Collection omServiceIds = conf.getTrimmedStringCollection( - OZONE_OM_SERVICE_IDS_KEY); - - if (omServiceIds.size() > 1) { - throw new OzoneClientException("Service ID or host name must not" - + " be omitted when multiple ozone.om.service.ids is defined."); - } else if (omServiceIds.size() == 1) { - client = createRpcClientFromServiceId(omServiceIds.iterator().next(), - conf); + String localOmServiceId = conf.getTrimmed(OZONE_OM_INTERNAL_SERVICE_ID); + if (localOmServiceId == null) { + Collection omServiceIds = conf.getTrimmedStringCollection( + OZONE_OM_SERVICE_IDS_KEY); + if (omServiceIds.size() > 1) { + throw new OzoneClientException("Service ID or host name must not" + + " be omitted when multiple ozone.om.service.ids is defined."); + } else if (omServiceIds.size() == 1) { + client = createRpcClientFromServiceId(omServiceIds.iterator().next(), + conf); + } else { + client = createRpcClient(conf); + } } else { - client = createRpcClient(conf); + client = createRpcClientFromServiceId(localOmServiceId, conf); } } - return client; } @@ -194,7 +197,7 @@ public OzoneClient createClientForS3Commands( if (omServiceID != null) { // OM HA cluster if (OmUtils.isOmHAServiceId(conf, omServiceID)) { - return OzoneClientFactory.getRpcClient(omServiceID, conf); + return createRpcClientFromServiceId(omServiceID, conf); } else { throw new OzoneClientException("Service ID specified does not match" + " with " + OZONE_OM_SERVICE_IDS_KEY + " defined in the " + @@ -202,8 +205,12 @@ public OzoneClient createClientForS3Commands( serviceIds); } } else if (serviceIds.size() > 1) { - // If multiple om service ids are there, + // If multiple om service ids are there and default value isn't set, // throw an error "om service ID must not be omitted" + String localOmServiceId = conf.getTrimmed(OZONE_OM_INTERNAL_SERVICE_ID); + if (!localOmServiceId.isEmpty()) { + return createRpcClientFromServiceId(localOmServiceId, conf); + } throw new OzoneClientException("Service ID must not" + " be omitted when cluster has multiple OM Services." + " Configured " + OZONE_OM_SERVICE_IDS_KEY + " are " @@ -211,7 +218,7 @@ public OzoneClient createClientForS3Commands( } // for non-HA cluster and HA cluster with only 1 service ID // get service ID from configurations - return OzoneClientFactory.getRpcClient(conf); + return createRpcClient(conf); } /** diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java index d1a6a4e156fd..4c795f1e82b4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java @@ -41,6 +41,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT; /** * Executes Delete Key. @@ -68,6 +70,12 @@ protected void execute(OzoneClient client, OzoneAddress address) return; } + if (bucket.getBucketLayout().isLegacy() && keyName.endsWith(OZONE_URI_DELIMITER) + && (getConf().getBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT))) { + out().printf("Use FS(ofs/o3fs) interface to delete legacy bucket directory %n"); + return; + } + if (bucket.getBucketLayout().isFileSystemOptimized()) { // Handle FSO delete key which supports trash also deleteFSOKey(bucket, keyName); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java new file mode 100644 index 000000000000..63b61b1ec662 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.shell.snapshot; + +import java.io.IOException; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.shell.Handler; +import org.apache.hadoop.ozone.shell.OzoneAddress; +import org.apache.hadoop.ozone.shell.bucket.BucketUri; +import picocli.CommandLine; + +/** + * ozone sh snapshot rename. + */ +@CommandLine.Command(name = "rename", + description = "Rename a snapshot") +public class RenameSnapshotHandler extends Handler { + + @CommandLine.Mixin + private BucketUri snapshotPath; + + @CommandLine.Parameters(description = "Current snapshot name", + index = "1", arity = "1") + private String snapshotOldName; + + @CommandLine.Parameters(description = "New snapshot name", + index = "2", arity = "1") + private String snapshotNewName; + + @Override + protected OzoneAddress getAddress() { + return snapshotPath.getValue(); + } + + @Override + protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { + String volumeName = snapshotPath.getValue().getVolumeName(); + String bucketName = snapshotPath.getValue().getBucketName(); + OmUtils.validateSnapshotName(snapshotNewName); + client.getObjectStore() + .renameSnapshot(volumeName, bucketName, snapshotOldName, snapshotNewName); + if (isVerbose()) { + out().format("Renamed snapshot from'%s' to %s under '%s/%s'.%n", + snapshotOldName, snapshotNewName, volumeName, bucketName); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java index cf513b9e913f..25a3c1c66fe9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java @@ -43,7 +43,8 @@ ListSnapshotHandler.class, SnapshotDiffHandler.class, ListSnapshotDiffHandler.class, - InfoSnapshotHandler.class + InfoSnapshotHandler.class, + RenameSnapshotHandler.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java index c9b58064fb56..be8b4ceed173 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExtendedUserAccessIdInfo; @@ -71,39 +70,32 @@ protected void execute(OzoneClient client, OzoneAddress address) if (!printJson) { out().println("User '" + userPrincipal + "' is assigned to:"); accessIdInfoList.forEach(accessIdInfo -> { - // Get admin info - final String adminInfoString; - if (accessIdInfo.getIsAdmin()) { - adminInfoString = accessIdInfo.getIsDelegatedAdmin() ? - " delegated admin" : " admin"; - } else { - adminInfoString = ""; - } + final String adminInfoString = accessIdInfo.getIsAdmin() ? + (accessIdInfo.getIsDelegatedAdmin() ? " delegated admin" : + " admin") : ""; out().format("- Tenant '%s'%s with accessId '%s'%n", accessIdInfo.getTenantId(), adminInfoString, accessIdInfo.getAccessId()); }); } else { + ObjectNode resObj = JsonUtils.createObjectNode(null); + resObj.put("user", userPrincipal); - final JsonObject resObj = new JsonObject(); - resObj.addProperty("user", userPrincipal); - - final JsonArray arr = new JsonArray(); + ArrayNode arr = JsonUtils.createArrayNode(); accessIdInfoList.forEach(accessIdInfo -> { - final JsonObject tenantObj = new JsonObject(); - tenantObj.addProperty("accessId", accessIdInfo.getAccessId()); - tenantObj.addProperty("tenantId", accessIdInfo.getTenantId()); - tenantObj.addProperty("isAdmin", accessIdInfo.getIsAdmin()); - tenantObj.addProperty("isDelegatedAdmin", - accessIdInfo.getIsDelegatedAdmin()); + ObjectNode tenantObj = JsonUtils.createObjectNode(null); + tenantObj.put("accessId", accessIdInfo.getAccessId()); + tenantObj.put("tenantId", accessIdInfo.getTenantId()); + tenantObj.put("isAdmin", accessIdInfo.getIsAdmin()); + tenantObj.put("isDelegatedAdmin", accessIdInfo.getIsDelegatedAdmin()); arr.add(tenantObj); }); - resObj.add("tenants", arr); - - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(resObj)); + resObj.set("tenants", arr); + String prettyJson = + JsonUtils.toJsonStringWithDefaultPrettyPrinter(resObj); + out().println(prettyJson); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java index 364fd21233b0..041b559608e2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine; @@ -55,14 +54,14 @@ protected void execute(OzoneClient client, OzoneAddress address) client.getObjectStore().tenantAssignAdmin(accessId, tenantId, delegated); if (isVerbose()) { - final JsonObject obj = new JsonObject(); - obj.addProperty("accessId", accessId); - obj.addProperty("tenantId", tenantId); - obj.addProperty("isAdmin", true); - obj.addProperty("isDelegatedAdmin", delegated); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(obj)); - } + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("accessId", accessId); + obj.put("tenantId", tenantId); + obj.put("isAdmin", true); + obj.put("isDelegatedAdmin", delegated); + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(obj); + out().println(jsonString); + } } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java index fd6c4109604c..1eac7685be76 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.TenantArgs; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -58,11 +57,11 @@ protected void execute(OzoneClient client, OzoneAddress address) // RpcClient#createTenant prints INFO level log of tenant and volume name if (isVerbose()) { - final JsonObject obj = new JsonObject(); - obj.addProperty("tenantId", tenantId); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(obj)); - } + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("tenantId", tenantId); + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(obj); + out().println(jsonString); + } } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java index 9924ac827ae2..c5e43e27a42c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; +import org.apache.hadoop.hdds.server.JsonUtils; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -60,13 +59,13 @@ protected void execute(OzoneClient client, OzoneAddress address) } if (isVerbose()) { - final JsonObject obj = new JsonObject(); - obj.addProperty("tenantId", tenantId); - obj.addProperty("volumeName", resp.getVolumeName()); - obj.addProperty("volumeRefCount", resp.getVolRefCount()); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("tenantId", tenantId); + obj.put("volumeName", resp.getVolumeName()); + obj.put("volumeRefCount", resp.getVolRefCount()); // Print raw response to stderr if verbose - out().println(gson.toJson(obj)); + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(obj); + out().println(jsonString); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java index 6f0428bd7b8c..3201eb456396 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; +import org.apache.hadoop.hdds.server.JsonUtils; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -50,23 +49,21 @@ protected void execute(OzoneClient client, OzoneAddress address) tenantStateList.getTenantStateList().forEach(tenantState -> out().println(tenantState.getTenantId())); } else { - final JsonArray resArray = new JsonArray(); + ArrayNode resArray = JsonUtils.createArrayNode(); tenantStateList.getTenantStateList().forEach(tenantState -> { - final JsonObject obj = new JsonObject(); - obj.addProperty("tenantId", tenantState.getTenantId()); - obj.addProperty("bucketNamespaceName", - tenantState.getBucketNamespaceName()); - obj.addProperty("userRoleName", tenantState.getUserRoleName()); - obj.addProperty("adminRoleName", tenantState.getAdminRoleName()); - obj.addProperty("bucketNamespacePolicyName", + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("tenantId", tenantState.getTenantId()); + obj.put("bucketNamespaceName", tenantState.getBucketNamespaceName()); + obj.put("userRoleName", tenantState.getUserRoleName()); + obj.put("adminRoleName", tenantState.getAdminRoleName()); + obj.put("bucketNamespacePolicyName", tenantState.getBucketNamespacePolicyName()); - obj.addProperty("bucketPolicyName", - tenantState.getBucketPolicyName()); + obj.put("bucketPolicyName", tenantState.getBucketPolicyName()); resArray.add(obj); }); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(resArray)); + // Serialize and print the JSON string with pretty printing + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(resArray); + out().println(jsonString); } - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java index e27a8cecd861..ae56f0ba16f7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java @@ -20,10 +20,9 @@ import java.io.IOException; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; +import org.apache.hadoop.hdds.server.JsonUtils; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.TenantUserList; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -66,15 +65,15 @@ protected void execute(OzoneClient client, OzoneAddress address) "' with accessId '" + accessIdInfo.getAccessId() + "'"); }); } else { - final JsonArray resArray = new JsonArray(); + ArrayNode resArray = JsonUtils.createArrayNode(); usersInTenant.getUserAccessIds().forEach(accessIdInfo -> { - final JsonObject obj = new JsonObject(); - obj.addProperty("user", accessIdInfo.getUserPrincipal()); - obj.addProperty("accessId", accessIdInfo.getAccessId()); + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("user", accessIdInfo.getUserPrincipal()); + obj.put("accessId", accessIdInfo.getAccessId()); resArray.add(obj); }); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(resArray)); + String prettyJsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(resArray); + out().println(prettyJsonString); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java index 419628246fe6..671864931a6f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine; @@ -48,14 +47,14 @@ protected void execute(OzoneClient client, OzoneAddress address) client.getObjectStore().tenantRevokeAdmin(accessId, tenantId); if (isVerbose()) { - final JsonObject obj = new JsonObject(); - obj.addProperty("accessId", accessId); - obj.addProperty("tenantId", tenantId); - obj.addProperty("isAdmin", false); - obj.addProperty("isDelegatedAdmin", false); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(obj)); + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("accessId", accessId); + obj.put("tenantId", tenantId); + obj.put("isAdmin", false); + obj.put("isDelegatedAdmin", false); + + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(obj); + out().println(jsonString); } - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java index e380e98561b0..8cc80502386f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java @@ -121,6 +121,11 @@ private void deleteVolumeRecursive() totalBucketCount++; } doCleanBuckets(); + // Reset counters and bucket list + numberOfBucketsCleaned.set(0); + totalBucketCount = 0; + cleanedBucketCounter.set(0); + bucketIdList.clear(); } /** @@ -201,6 +206,7 @@ public void run() { if (!cleanOBSBucket(bucket)) { throw new RuntimeException("Failed to clean bucket"); } + break; default: throw new RuntimeException("Invalid bucket layout"); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java index 47f465383d01..87e0eda9b5e7 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java @@ -192,7 +192,7 @@ public void testLoadCommand() { Exception e = assertThrows(Exception.class, () -> execute(args1, "")); assertInstanceOf(ArrayIndexOutOfBoundsException.class, e.getCause()); - assertThat(e.getMessage()).contains(": 5"); + assertThat(e.getMessage()).contains(" 5"); } /** diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java new file mode 100644 index 000000000000..272bf24c066e --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Tests the ozone repair command. + */ +public class TestOzoneRepair { + + private final ByteArrayOutputStream out = new ByteArrayOutputStream(); + private final ByteArrayOutputStream err = new ByteArrayOutputStream(); + private static final PrintStream OLD_OUT = System.out; + private static final PrintStream OLD_ERR = System.err; + private static final String DEFAULT_ENCODING = UTF_8.name(); + + private static final String OZONE_USER = "ozone"; + private static final String OLD_USER = System.getProperty("user.name"); + + @BeforeEach + public void setup() throws Exception { + System.setOut(new PrintStream(out, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(err, false, DEFAULT_ENCODING)); + System.setProperty("user.name", OZONE_USER); + } + + @AfterEach + public void reset() { + // reset stream after each unit test + out.reset(); + err.reset(); + + // restore system streams + System.setOut(OLD_OUT); + System.setErr(OLD_ERR); + System.setProperty("user.name", OLD_USER); + } + + @Test + void testOzoneRepairWhenUserIsRemindedSystemUserAndDeclinesToProceed() throws Exception { + OzoneRepair ozoneRepair = new OzoneRepair(); + System.setIn(new ByteArrayInputStream("N".getBytes(DEFAULT_ENCODING))); + + int res = ozoneRepair.execute(new String[]{}); + assertEquals(1, res); + assertThat(out.toString(DEFAULT_ENCODING)).contains("Aborting command."); + // prompt should contain the current user name as well + assertThat(err.toString(DEFAULT_ENCODING)).contains("ATTENTION: Running as user " + OZONE_USER); + } + + @Test + void testOzoneRepairWhenUserIsRemindedSystemUserAndAgreesToProceed() throws Exception { + OzoneRepair ozoneRepair = new OzoneRepair(); + System.setIn(new ByteArrayInputStream("y".getBytes(DEFAULT_ENCODING))); + + ozoneRepair.execute(new String[]{}); + assertThat(out.toString(DEFAULT_ENCODING)).contains("Run as user: " + OZONE_USER); + // prompt should contain the current user name as well + assertThat(err.toString(DEFAULT_ENCODING)).contains("ATTENTION: Running as user " + OZONE_USER); + } + +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java index 2457a00fe52c..620142c244b5 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java @@ -22,10 +22,12 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.MutableConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.hdds.conf.InMemoryConfiguration; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -68,6 +70,34 @@ public void implicitHaMultipleServiceId() "service1,service2"))); } + @Test + public void implicitHaMultipleServiceIdWithDefaultServiceId() + throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("/vol1/bucket1/key1"); + InMemoryConfiguration conf = new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, + "service1,service2"); + conf.set(OZONE_OM_INTERNAL_SERVICE_ID, "service2"); + + address.createClient(conf); + assertFalse(address.simpleCreation); + assertEquals("service2", address.serviceId); + } + + @Test + public void implicitHaMultipleServiceIdWithDefaultServiceIdForS3() + throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("/vol1/bucket1/key1"); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OZONE_OM_SERVICE_IDS_KEY, "service1,service2"); + conf.set(OZONE_OM_INTERNAL_SERVICE_ID, "service2"); + + address.createClientForS3Commands(conf, null); + assertFalse(address.simpleCreation); + assertEquals("service2", address.serviceId); + } + @Test public void explicitHaMultipleServiceId() throws OzoneClientException, IOException { diff --git a/pom.xml b/pom.xml index 37dfb139e2cc..20b2d19ee72b 100644 --- a/pom.xml +++ b/pom.xml @@ -94,7 +94,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs UTF-8 UTF-8 - 1.5 + 3.2.2 bash @@ -103,30 +103,27 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs false false true - 9.4.53.v20231009 + 9.4.54.v20240208 5.2.0 4.2.0 - _ - _ 4 1.9.4 1.6.0 - 1.15 + 1.17.0 3.2.2 - 1.25.0 - 2.8.0 - 1.5.2-5 - 1.0.13 - 2.11.0 + 1.26.0 + 2.10.1 + 1.5.6-3 + 1.3.4 + 2.16.1 3.14.0 1.2 1.1 3.6.1 3.10.0 - 2.6.0 1.11.0 1.6 1.5 @@ -139,19 +136,17 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.3.2 3.12.2 - 5.0.4 0.8.0.RELEASE - 1.77 - 3.3.0 + 1.78.1 10.14.2.0 3.0.2 - 3.2.4 - 0.8.5 - 3.21.0-GA + 3.2.6 + 0.8.12 + 3.30.2-GA 1.2.2 2.3.3 2.3.9 - 0.1.54 + 0.1.55 2.0 3.1.0 3.1 @@ -160,12 +155,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.19.4 - 2.41 + 2.42 1.9.13 1.9.13 - 2.13.4.20221013 + 2.16.2 5.4.0 @@ -175,20 +170,21 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.6.1 - 4.5.13 + 4.5.14 4.4.16 - 2.0.10 + 2.0.13 2.17.1 - 3.4.2 + 3.4.4 1.2.25 - 1.9.22 - 1.8 + 1.0.1 + 1.9.24 + 1.11 4.7.5 0.16.0 - 0.9.11 + 0.10.2 1.7 @@ -199,38 +195,35 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.7.1 1.1.1 - 3.0.0 - 3.1.12 - 2.1.7 + 3.1.12.2 + 2.1.9 4.12.0 4.2.2 2.6.1 2.1.1 - 2.12.5 + 2.12.7 0.19 2.2.0 - 32.0.0-jre - 5.1.0 - 2.9.0 + 32.1.3-jre + 6.0.0 + 2.10.1 - 1.0 2.7.5 3.6.0 4.11.0 2.2 - 1.24 - 5.10.1 - 3.7.2 + 5.10.2 + 3.8.4 - 0.5.1 + 0.6.1 3.19.6 1.7.1 - 4.1.100.Final + 4.1.109.Final 1.58.0 7.7.3 @@ -251,31 +244,30 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs -Xmx4096m -XX:+HeapDumpOnOutOfMemoryError - flaky | slow | unhealthy - 3.0.0-M5 + flaky | native | slow | unhealthy + 3.0.0-M4 ${maven-surefire-plugin.version} - ${maven-surefire-plugin.version} 3.3.2 3.9.0 3.1.1 3.1.0 - 3.5.1 - 3.3.0 + 3.5.2 + 3.4.1 3.4.0 - 3.3.0 + 3.3.1 1.6.1 - 1.5 + 1.7.0 3.4.1 3.0.1 - 3.6.0 + 3.7.1 0.16.1 - 2.8.1 - 1.9 + 3.1.2 + 3.5.0 3.6.1 4.2.2 - 0.29.0 - 1.3.1 + 0.44.0 + 3.1.1 2.3.0 1.0-beta-1 1.0-alpha-11 @@ -284,7 +276,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.1.0 9.3 1200 - 1.12.632 + 1.12.661 1.15.0 @@ -296,23 +288,22 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${basedir}/target/classes 1.9.7 - 1.14.0 - 2.4.0 - 1.0.8 - 1.2.13 - 1.9.3 - 1.1.8 - 1.4.9 - 1.0.1 - - 5.3.27 + 1.15.0 + 2.5.0 + 1.4.0 + + 5.3.34 3.11.10 5.1.0 + 1.2.1 + 3.9.6 + 1.1.10.5 + 1.2.0 + 9.37.2 - @@ -345,44 +336,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-annotations ${hadoop.version} - - org.apache.hadoop - hadoop-client-modules - ${hadoop.version} - pom - - - org.apache.hadoop - hadoop-client-api - ${hadoop.version} - - - org.apache.hadoop - hadoop-client-check-invariants - ${hadoop.version} - pom - - - org.apache.hadoop - hadoop-client-check-test-invariants - ${hadoop.version} - pom - - - org.apache.hadoop - hadoop-client-integration-tests - ${hadoop.version} - - - org.apache.hadoop - hadoop-client-runtime - ${hadoop.version} - - - org.apache.hadoop - hadoop-client-minicluster - ${hadoop.version} - org.apache.hadoop hadoop-common @@ -405,11 +358,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} test-jar - - org.apache.hadoop - hadoop-nfs - ${hadoop.version} - org.apache.hadoop hadoop-hdfs @@ -420,237 +368,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-hdfs-client ${hadoop.version} - - org.apache.hadoop - hadoop-hdfs-rbf - ${hadoop.version} - - - org.apache.hadoop - hadoop-mapreduce-client-app - ${hadoop.version} - - - org.apache.hadoop - hadoop-mapreduce-client-app - ${hadoop.version} - test-jar - - - org.apache.hadoop - hadoop-mapreduce-client-common - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-api - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-client - ${hadoop.version} - - - - org.apache.hadoop - hadoop-mapreduce-client-core - ${hadoop.version} - - org.apache.hadoop hadoop-mapreduce-client-jobclient ${hadoop.version} - - - - org.apache.hadoop - hadoop-mapreduce-client-shuffle - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn - ${hadoop.version} - pom - - - - org.apache.hadoop - hadoop-yarn-server - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-web-proxy - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-common - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-common - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-server-tests - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-common - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-common - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-registry - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-server-nodemanager - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-server-resourcemanager - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-server-resourcemanager - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-server-applicationhistoryservice - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice-hbase-client - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice-hbase-common - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-applications-distributedshell - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timeline-pluginstorage - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timeline-pluginstorage - test-jar - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-router - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-services-core - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-services-core - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-mapreduce-client-jobclient - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-mapreduce-client-hs - ${hadoop.version} - - - - org.apache.hadoop - hadoop-mapreduce-examples - ${hadoop.version} - - - org.apache.hadoop - hadoop-gridmix - ${hadoop.version} - - - - org.apache.hadoop - hadoop-streaming - ${hadoop.version} - - - org.apache.hadoop - hadoop-archives - ${hadoop.version} - - - org.apache.hadoop - hadoop-archive-logs - ${hadoop.version} + test org.apache.hadoop @@ -663,70 +385,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} test-jar - - org.apache.hadoop - hadoop-datajoin - ${hadoop.version} - - - org.apache.hadoop - hadoop-rumen - ${hadoop.version} - - - org.apache.hadoop - hadoop-extras - ${hadoop.version} - - org.apache.hadoop hadoop-client ${hadoop.version} - - - org.apache.hadoop - hadoop-minicluster - ${hadoop.version} - - org.apache.hadoop hadoop-minikdc ${hadoop.version} - - - org.apache.hadoop - hadoop-openstack - ${hadoop.version} - - - - org.apache.hadoop - hadoop-azure - ${hadoop.version} - - - - org.apache.hadoop - hadoop-azure-datalake - ${hadoop.version} - - - - org.apache.hadoop - hadoop-aws - ${hadoop.version} - - - - org.apache.hadoop - hadoop-aliyun - ${hadoop.version} - - org.apache.hadoop hadoop-kms @@ -738,11 +406,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} test-jar - com.google.guava guava ${guava.version} + + + com.google.code.findbugs + jsr305 + + com.google.code.gson @@ -779,6 +452,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs httpcore-nio ${httpcore.version} + + org.apache.kerby + kerb-core + ${kerby.version} + + + org.apache.kerby + kerb-util + ${kerby.version} + commons-codec commons-codec @@ -789,11 +472,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs commons-net ${commons-net.version} - - org.apache.commons - commons-pool2 - ${commons-pool2.version} - commons-validator commons-validator @@ -946,12 +624,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs jersey-media-jaxb ${jersey2.version} - - - org.ow2.asm - asm - ${asm.version} - com.sun.jersey jersey-core @@ -1006,13 +678,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs bonecp ${bonecp.version} - - - cglib - cglib - ${cglib.version} - - com.sun.jersey.contribs jersey-guice @@ -1234,6 +899,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.zookeeper zookeeper ${zookeeper.version} + + + ch.qos.logback + logback-core + + + ch.qos.logback + logback-classic + + org.slf4j @@ -1273,11 +948,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs pom import - - org.jmockit - jmockit - ${jmockit.version} - org.mockito mockito-core @@ -1293,11 +963,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs compile-testing ${compile-testing.version} - - org.objenesis - objenesis - ${objenesis.version} - com.google.re2j re2j @@ -1403,11 +1068,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-cloud-storage ${hadoop.version} - - com.google.code.findbugs - jsr305 - ${findbugs.version} - jakarta.xml.bind jakarta.xml.bind-api @@ -1471,6 +1131,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.reflections reflections ${reflections.version} + + + com.google.code.findbugs + jsr305 + + org.rocksdb @@ -1552,6 +1218,21 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs mockito-inline ${mockito.version} + + org.xerial.snappy + snappy-java + ${snappy-java.version} + + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + ${hadoop-shaded-guava.version} + + + com.nimbusds + nimbus-jose-jwt + ${com.nimbusds.nimbus-jose-jwt.version} + @@ -1643,11 +1324,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin ${maven-surefire-plugin.version} - - org.apache.maven.plugins - maven-failsafe-plugin - ${maven-failsafe-plugin.version} - org.apache.maven.plugins maven-install-plugin @@ -1775,6 +1451,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.rocksdb.OptionsUtil org.rocksdb.RocksDBException org.rocksdb.StatsLevel + org.rocksdb.TableProperties org.rocksdb.TransactionLogIterator.BatchResult org.rocksdb.TickerType org.rocksdb.LiveFileMetaData @@ -1787,7 +1464,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.rocksdb.RocksDB.* - org.apache.hadoop.hdds.utils.db.managed.* + + org.apache.hadoop.hdds.utils.db.managed.* + org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer + @@ -2026,11 +1706,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs **/Test*.java - - **/${test.exclude}.java - ${test.exclude.pattern} - **/Test*$*.java - @@ -2091,6 +1766,25 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + org.codehaus.mojo + properties-maven-plugin + ${properties.maven.plugin.version} + + + org.apache.maven + maven-core + ${maven.core.version} + + + + + org.apache.rat + apache-rat-plugin + + dev-support/rat/rat-exclusions.txt + + @@ -2243,6 +1937,17 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + go-offline + + void + true + true + true + true + true + + client @@ -2260,6 +1965,24 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + container + + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.apache.hadoop.hdds.scm.container.** + org.apache.hadoop.ozone.container.** + + ${unstable-test-groups} + + + + + om @@ -2271,6 +1994,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.ozone.om.** + + org.apache.hadoop.ozone.om.snapshot.** + ${unstable-test-groups} @@ -2278,7 +2004,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - contract + snapshot @@ -2286,7 +2012,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin - org.apache.hadoop.fs.ozone.contract.** + org.apache.hadoop.ozone.om.snapshot.** ${unstable-test-groups} @@ -2303,11 +2029,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin - org.apache.hadoop.fs.ozone.** + org.apache.hadoop.fs.** - - org.apache.hadoop.fs.ozone.contract.** - ${unstable-test-groups} @@ -2325,6 +2048,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.hdds.** + + org.apache.hadoop.hdds.scm.container.** + ${unstable-test-groups} @@ -2340,13 +2066,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin + org.apache.ozone.** org.apache.hadoop.ozone.** org.apache.hadoop.ozone.client.** + org.apache.hadoop.ozone.container.** org.apache.hadoop.ozone.debug.** org.apache.hadoop.ozone.freon.** org.apache.hadoop.ozone.om.** + org.apache.hadoop.ozone.recon.** org.apache.hadoop.ozone.shell.** ${unstable-test-groups} @@ -2355,6 +2084,23 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + recon + + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.apache.hadoop.ozone.recon.** + + ${unstable-test-groups} + + + + + shell @@ -2397,7 +2143,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin flaky - slow | unhealthy + native | slow | unhealthy @@ -2412,6 +2158,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin native + slow | unhealthy