diff --git a/.github/close-pending.sh b/.github/close-pending.sh new file mode 100755 index 000000000000..ae05001d479e --- /dev/null +++ b/.github/close-pending.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +MESSAGE=$(cat $SCRIPT_DIR/closing-message.txt) + +while IFS= read -r number && + IFS= read -r title; do + echo "Closing PR ($number): $title" + curl -s -o /dev/null \ + -X POST \ + --data "$(jq --arg body "$MESSAGE" -n '{body: $body}')" \ + --header "authorization: Bearer $GITHUB_TOKEN" \ + --header 'content-type: application/json' \ + "https://api.github.com/repos/apache/hadoop-ozone/issues/$number/comments" + + curl -s -o /dev/null \ + -X PATCH \ + --data '{"state": "close"}' \ + --header "authorization: Bearer $GITHUB_TOKEN" \ + --header 'content-type: application/json' \ + "https://api.github.com/repos/apache/hadoop-ozone/pulls/$number" +done < <(curl -H "Content-Type: application/json" \ + --header "authorization: Bearer $GITHUB_TOKEN" \ + "https://api.github.com/search/issues?q=repo:apache/hadoop-ozone+type:pr+updated:<$(date -d "-21 days" +%Y-%m-%d)+label:pending+is:open" \ + | jq -r '.items[] | (.number,.title)') diff --git a/.github/closing-message.txt b/.github/closing-message.txt new file mode 100644 index 000000000000..261eac275e0e --- /dev/null +++ b/.github/closing-message.txt @@ -0,0 +1,7 @@ +Thank you very much for the patch. I am closing this PR __temporarily__ as there was no activity recently and it is waiting for response from its author. + +It doesn't mean that this PR is not important or ignored: feel free to reopen the PR at any time. + +It only means that attention of committers is not required. We prefer to keep the review queue clean. This ensures PRs in need of review are more visible, which results in faster feedback for all PRs. + +If you need ANY help to finish this PR, please [contact the community](https://github.com/apache/hadoop-ozone#contact) on the mailing list or the slack channel." diff --git a/.github/comment-commands/close.sh b/.github/comment-commands/close.sh index 4624bd869c3f..cb57b7192138 100755 --- a/.github/comment-commands/close.sh +++ b/.github/comment-commands/close.sh @@ -16,14 +16,8 @@ #doc: Close pending pull request temporary # shellcheck disable=SC2124 -MESSAGE="Thank you very much for the patch. I am closing this PR __temporarily__ as there was no -activity recently and it is waiting for response from its author. - -It doesn't mean that this PR is not important or ignored: feel free to reopen the PR at any time. - -It only means that attention of committers is not required. We prefer to keep the review queue clean. This ensures PRs in need of review are more visible, which results in faster feedback for all PRs. - -If you need ANY help to finish this PR, please [contact the community](https://github.com/apache/hadoop-ozone#contact) on the mailing list or the slack channel." +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +MESSAGE=$(cat $SCRIPT_DIR/../closing-message.txt) set +x #GITHUB_TOKEN curl -s -o /dev/null \ diff --git a/.github/comment-commands/pending.sh b/.github/comment-commands/pending.sh index 08947f636b8b..840ed82889d2 100755 --- a/.github/comment-commands/pending.sh +++ b/.github/comment-commands/pending.sh @@ -20,6 +20,7 @@ MESSAGE="Marking this issue as un-mergeable as requested. Please use \`/ready\` comment when it's resolved. +Please note that the PR will be closed after 21 days of inactivity from now. (But can be re-opened anytime later...) > $@" URL="$(jq -r '.issue.pull_request.url' "$GITHUB_EVENT_PATH")/reviews" diff --git a/.github/comment-commands/retest.sh b/.github/comment-commands/retest.sh index b9e063260cac..2214264e9b28 100755 --- a/.github/comment-commands/retest.sh +++ b/.github/comment-commands/retest.sh @@ -14,20 +14,62 @@ # See the License for the specific language governing permissions and # limitations under the License. -#doc: add new empty commit to trigger new CI build -set +x #GITHUB_TOKEN +#doc: provide help on how to trigger new CI build -PR_URL=$(jq -r '.issue.pull_request.url' "$GITHUB_EVENT_PATH") -read -r REPO_URL BRANCH <<<"$(curl "$PR_URL" | jq -r '.head.repo.clone_url + " " + .head.ref' | sed "s/github.com/$GITHUB_ACTOR:$GITHUB_TOKEN@github.com/g")" +# posting a new commit from this script does not trigger CI checks +# https://help.github.com/en/actions/reference/events-that-trigger-workflows#triggering-new-workflows-using-a-personal-access-token -git fetch "$REPO_URL" "$BRANCH" -git checkout FETCH_HEAD +set -eu + +code='```' + +pr_url="$(jq -r '.issue.pull_request.url' "${GITHUB_EVENT_PATH}")" +commenter="$(jq -r '.comment.user.login' "${GITHUB_EVENT_PATH}")" +assoc="$(jq -r '.comment.author_association' "${GITHUB_EVENT_PATH}")" -export GIT_COMMITTER_EMAIL="noreply@github.com" -export GIT_COMMITTER_NAME="GitHub actions" +curl -LSs "${pr_url}" -o pull.tmp +source_repo="$(jq -r '.head.repo.ssh_url' pull.tmp)" +branch="$(jq -r '.head.ref' pull.tmp)" +pr_owner="$(jq -r '.head.user.login' pull.tmp)" +maintainer_can_modify="$(jq -r '.maintainer_can_modify' pull.tmp)" -export GIT_AUTHOR_EMAIL="noreply@github.com" -export GIT_AUTHOR_NAME="GitHub actions" +# PR owner +# => +# has local branch, can simply push +if [[ "${commenter}" == "${pr_owner}" ]]; then + cat <<-EOF +To re-run CI checks, please follow these steps with the source branch checked out: +${code} +git commit --allow-empty -m 'trigger new CI check' +git push +${code} +EOF + +# member AND modification allowed by PR author +# OR +# repo owner +# => +# include steps to fetch branch +elif [[ "${maintainer_can_modify}" == "true" ]] && [[ "${assoc}" == "MEMBER" ]] || [[ "${assoc}" == "OWNER" ]]; then + cat <<-EOF +To re-run CI checks, please follow these steps: +${code} +git fetch "${source_repo}" "${branch}" +git checkout FETCH_HEAD +git commit --allow-empty -m 'trigger new CI check' +git push "${source_repo}" HEAD:"${branch}" +${code} +EOF -git commit --allow-empty -m "empty commit to retest build" > /dev/null -git push $REPO_URL HEAD:$BRANCH +# other folks +# => +# ping author +else + cat <<-EOF +@${pr_owner} please trigger new CI check by following these steps: +${code} +git commit --allow-empty -m 'trigger new CI check' +git push +${code} +EOF +fi diff --git a/.github/workflows/close-pending.yaml b/.github/workflows/close-pending.yaml new file mode 100644 index 000000000000..e335701e4cf2 --- /dev/null +++ b/.github/workflows/close-pending.yaml @@ -0,0 +1,32 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: close-prs + +on: + schedule: + - cron: '0 0 * * *' + +jobs: + close-pending: + name: close-pending + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@master + - name: Execute close-pending script + if: github.repository == 'apache/hadoop-ozone' + run: ./.github/close-pending.sh + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/comments.yaml b/.github/workflows/comments.yaml index bfab244e88ac..2341662ca63a 100644 --- a/.github/workflows/comments.yaml +++ b/.github/workflows/comments.yaml @@ -25,7 +25,7 @@ jobs: name: check-comment runs-on: ubuntu-latest steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - run: ./.github/process-comment.sh env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index e00018ac15f1..20d2a2a5d1f5 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -23,8 +23,10 @@ jobs: name: compile runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master - - uses: actions/cache@v2 + - name: Checkout project + uses: actions/checkout@v2 + - name: Cache for npm dependencies + uses: actions/cache@v2 with: path: | ~/.pnpm-store @@ -32,98 +34,145 @@ jobs: key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }} restore-keys: | ${{ runner.os }}-pnpm- - - uses: ./.github/buildenv + - name: Execute tests + uses: ./.github/buildenv with: args: ./hadoop-ozone/dev-support/checks/build.sh + bats: + runs-on: ubuntu-18.04 + steps: + - name: Checkout project + uses: actions/checkout@v2 + - name: Install bats + run: | + cd /tmp + curl -LSs https://github.com/bats-core/bats-core/archive/v1.2.1.tar.gz | tar xzf - + cd bats-core-1.2.1 + sudo ./install.sh /usr/local + - name: Execute tests + run: ./hadoop-ozone/dev-support/checks/${{ github.job }}.sh + - name: Summary of failures + run: cat target/${{ github.job }}/summary.txt + if: always() + - name: Archive build results + uses: actions/upload-artifact@v2 + if: always() + with: + name: ${{ github.job }} + path: target/${{ github.job }} + continue-on-error: true rat: - name: rat runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master - - uses: ./.github/buildenv - with: - args: ./hadoop-ozone/dev-support/checks/rat.sh - - name: Summary of failures - run: cat target/${{ github.job }}/summary.txt - if: always() - - uses: actions/upload-artifact@master - if: always() - with: - name: rat - path: target/rat + - name: Checkout project + uses: actions/checkout@v2 + - name: Execute tests + uses: ./.github/buildenv + with: + args: ./hadoop-ozone/dev-support/checks/rat.sh + - name: Summary of failures + run: cat target/${{ github.job }}/summary.txt + if: always() + - name: Archive build results + uses: actions/upload-artifact@v2 + if: always() + with: + name: rat + path: target/rat + continue-on-error: true author: - name: author runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master - - uses: ./.github/buildenv - with: - args: ./hadoop-ozone/dev-support/checks/author.sh - - name: Summary of failures - run: cat target/${{ github.job }}/summary.txt - if: always() - - uses: actions/upload-artifact@master - if: always() - with: - name: author - path: target/author + - name: Checkout project + uses: actions/checkout@v2 + - name: Execute tests + run: hadoop-ozone/dev-support/checks/author.sh + - name: Summary of failures + run: cat target/${{ github.job }}/summary.txt + if: always() + - name: Archive build results + uses: actions/upload-artifact@v2 + if: always() + with: + name: author + path: target/author + continue-on-error: true unit: - name: unit runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master - - uses: ./.github/buildenv - with: - args: ./hadoop-ozone/dev-support/checks/unit.sh - - name: Summary of failures - run: cat target/${{ github.job }}/summary.txt - if: always() - - uses: actions/upload-artifact@master - if: always() - with: - name: unit - path: target/unit + - name: Checkout project + uses: actions/checkout@v2 + - name: Execute tests + uses: ./.github/buildenv + with: + args: ./hadoop-ozone/dev-support/checks/unit.sh + - name: Summary of failures + run: cat target/${{ github.job }}/summary.txt + if: always() + - name: Archive build results + uses: actions/upload-artifact@v2 + if: always() + with: + name: unit + path: target/unit + continue-on-error: true checkstyle: - name: checkstyle runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master - - uses: ./.github/buildenv - with: - args: ./hadoop-ozone/dev-support/checks/checkstyle.sh - - name: Summary of failures - run: cat target/${{ github.job }}/summary.txt - if: always() - - uses: actions/upload-artifact@master - if: always() - with: - name: checkstyle - path: target/checkstyle + - name: Checkout project + uses: actions/checkout@v2 + - name: Execute tests + uses: ./.github/buildenv + with: + args: ./hadoop-ozone/dev-support/checks/checkstyle.sh + - name: Summary of failures + run: cat target/${{ github.job }}/summary.txt + if: always() + - name: Archive build results + uses: actions/upload-artifact@v2 + if: always() + with: + name: checkstyle + path: target/checkstyle + continue-on-error: true findbugs: - name: findbugs runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@master - - uses: ./.github/buildenv - with: - args: ./hadoop-ozone/dev-support/checks/findbugs.sh - - name: Summary of failures - run: cat target/${{ github.job }}/summary.txt - if: always() - - uses: actions/upload-artifact@master - if: always() - with: - name: findbugs - path: target/findbugs + - name: Checkout project + uses: actions/checkout@v2 + - name: Execute tests + uses: ./.github/buildenv + with: + args: ./hadoop-ozone/dev-support/checks/findbugs.sh + - name: Summary of failures + run: cat target/${{ github.job }}/summary.txt + if: always() + - name: Archive build results + uses: actions/upload-artifact@v2 + if: always() + with: + name: findbugs + path: target/findbugs + continue-on-error: true acceptance: - name: acceptance runs-on: ubuntu-18.04 + strategy: + matrix: + suite: + - secure + - unsecure + - misc + fail-fast: false steps: - - uses: actions/cache@v2 + - name: Checkout project + uses: actions/checkout@v2 + - name: Cache for maven dependencies + uses: actions/cache@v2 with: path: ~/.m2/repository key: maven-repo-${{ hashFiles('**/pom.xml') }} - - uses: actions/cache@v2 + - name: Cache for npm dependencies + uses: actions/cache@v2 with: path: | ~/.pnpm-store @@ -131,96 +180,179 @@ jobs: key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }} restore-keys: | ${{ runner.os }}-pnpm- - - name: checkout to /mnt/ozone + - name: Checkout to /mnt/ozone run: | sudo chmod 777 /mnt - git clone https://github.com/${GITHUB_REPOSITORY}.git /mnt/ozone + git clone 'https://github.com/${{ github.repository }}.git' /mnt/ozone cd /mnt/ozone - git fetch origin "${GITHUB_REF}" + if [[ '${{ github.event_name }}' == 'pull_request' ]]; then + git fetch --verbose origin '${{ github.ref }}' + else + git fetch --verbose origin '${{ github.sha }}' + fi git checkout FETCH_HEAD git reset --hard - - name: run a full build + - name: Run a full build run: | cd /mnt/ozone hadoop-ozone/dev-support/checks/build.sh -Pcoverage - - run: sudo pip install robotframework - - run: | + - name: Install robotframework + run: sudo pip install robotframework + - name: Execute tests + run: | cd /mnt/ozone/hadoop-ozone/dist/target/ozone-* && sudo mkdir .aws && sudo chmod 777 .aws && sudo chown 1000 .aws cd /mnt/ozone && hadoop-ozone/dev-support/checks/acceptance.sh env: KEEP_IMAGE: false + OZONE_ACCEPTANCE_SUITE: ${{ matrix.suite }} OZONE_WITH_COVERAGE: true - - uses: actions/upload-artifact@master + OZONE_VOLUME_OWNER: 1000 + - name: Archive build results + uses: actions/upload-artifact@v2 if: always() with: - name: acceptance + name: acceptance-${{ matrix.suite }} path: /mnt/ozone/target/acceptance - - run: | + continue-on-error: true + - name: Delete temporary build artifacts before caching + run: | #Never cache local artifacts rm -rf ~/.m2/repository/org/apache/hadoop/hdds rm -rf ~/.m2/repository/org/apache/hadoop/ozone if: always() integration: - name: integration runs-on: ubuntu-18.04 - needs: - - build strategy: matrix: profile: - client - - filesystem - - filesystem-contract - - freon - - hdds-om + - filesystem-hdds - ozone fail-fast: false steps: - - run: sudo mkdir mnt && sudo mount --bind /mnt `pwd`/mnt && sudo chmod 777 mnt - - uses: actions/checkout@master - with: - path: mnt/ozone - - uses: ./mnt/ozone/.github/buildenv - with: - args: ./mnt/ozone/hadoop-ozone/dev-support/checks/integration.sh -P${{ matrix.profile }} - - name: Summary of failures - run: cat mnt/ozone/target/${{ github.job }}/summary.txt - if: always() - - uses: actions/upload-artifact@master - if: always() - with: - name: it-${{ matrix.profile }} - path: mnt/ozone/target/integration + - name: Setup link to SSD + run: sudo mkdir mnt && sudo mount --bind /mnt `pwd`/mnt && sudo chmod 777 mnt + - name: Checkout project + uses: actions/checkout@v2 + with: + path: mnt/ozone + - name: Execute tests + uses: ./mnt/ozone/.github/buildenv + with: + args: ./mnt/ozone/hadoop-ozone/dev-support/checks/integration.sh -P${{ matrix.profile }} + - name: Summary of failures + run: cat mnt/ozone/target/${{ github.job }}/summary.txt + if: always() + - name: Archive build results + uses: actions/upload-artifact@v2 + if: always() + with: + name: it-${{ matrix.profile }} + path: mnt/ozone/target/integration + continue-on-error: true coverage: - name: coverage runs-on: ubuntu-18.04 needs: - - acceptance - - integration - - unit + - acceptance + - integration + - unit + steps: + - name: Checkout project + uses: actions/checkout@v2 + - name: Run a full build + uses: ./.github/buildenv + with: + args: ./hadoop-ozone/dev-support/checks/build.sh + - name: Download artifacts + uses: actions/download-artifact@v2 + with: + path: target/artifacts + - name: Calculate combined coverage + run: ./hadoop-ozone/dev-support/checks/coverage.sh + - name: Upload coverage to Sonar + uses: ./.github/buildenv + if: github.repository == 'apache/hadoop-ozone' && github.event_name != 'pull_request' + with: + args: ./hadoop-ozone/dev-support/checks/sonar.sh + env: + SONAR_TOKEN: ${{ secrets.SONARCLOUD_TOKEN }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./target/coverage/all.xml + name: codecov-umbrella + fail_ci_if_error: true + - name: Archive build results + uses: actions/upload-artifact@v2 + with: + name: coverage + path: target/coverage + continue-on-error: true + kubernetes: + runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - uses: ./.github/buildenv - with: - args: ./hadoop-ozone/dev-support/checks/build.sh - - uses: actions/download-artifact@v2 - with: - path: target/artifacts - - run: ./hadoop-ozone/dev-support/checks/coverage.sh - - uses: ./.github/buildenv - if: github.repository == 'apache/hadoop-ozone' && github.event_name != 'pull_request' - with: - args: ./hadoop-ozone/dev-support/checks/sonar.sh - env: - SONAR_TOKEN: ${{ secrets.SONARCLOUD_TOKEN }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 - with: - file: ./target/coverage/all.xml - name: codecov-umbrella - fail_ci_if_error: true - - uses: actions/upload-artifact@master - with: - name: coverage - path: target/coverage + - name: Checkout project + uses: actions/checkout@v2 + - name: Cache for maven dependencies + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: maven-repo-${{ hashFiles('**/pom.xml') }} + - name: Cache for npm dependencies + uses: actions/cache@v2 + with: + path: | + ~/.pnpm-store + **/node_modules + key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm- + - name: Checkout to /mnt/ozone + run: | + sudo chmod 777 /mnt + git clone 'https://github.com/${{ github.repository }}.git' /mnt/ozone + cd /mnt/ozone + if [[ '${{ github.event_name }}' == 'pull_request' ]]; then + git fetch --verbose origin '${{ github.ref }}' + else + git fetch --verbose origin '${{ github.sha }}' + fi + git checkout FETCH_HEAD + git reset --hard + - name: Install robotframework + run: sudo pip install robotframework + - name: Install k3s + run: curl -sfL https://get.k3s.io | sh - + - name: Copy Kubernetes config file + run: | + sudo mkdir ~/.kube + sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config + sudo chown $(id -u) ~/.kube/config + - name: Install flekszible + run: | + cd /tmp + wget https://github.com/elek/flekszible/releases/download/v1.8.1/flekszible_1.8.1_Linux_x86_64.tar.gz -O - | tar -zx + chmod +x flekszible + sudo mv flekszible /usr/bin/flekszible + - name: Run a full build + run: | + cd /mnt/ozone + hadoop-ozone/dev-support/checks/build.sh -Pcoverage + - name: Execute tests + run: | + cd /mnt/ozone/hadoop-ozone/dist/target/ozone-* && sudo mkdir .aws && sudo chmod 777 .aws && sudo chown 1000 .aws + cd /mnt/ozone && hadoop-ozone/dev-support/checks/kubernetes.sh + - name: Archive build results + uses: actions/upload-artifact@v2 + if: always() + with: + name: kubernetes + path: /mnt/ozone/target/kubernetes + continue-on-error: true + - name: Delete temporary build artifacts before caching + run: | + #Never cache local artifacts + rm -rf ~/.m2/repository/org/apache/hadoop/hdds + rm -rf ~/.m2/repository/org/apache/hadoop/ozone + if: always() diff --git a/.gitignore b/.gitignore index 551b1b5361ce..e09c2eb819c0 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ .classpath .project .settings +*.factorypath target build dependency-reduced-pom.xml @@ -61,5 +62,6 @@ output.xml report.html hadoop-hdds/docs/public +hadoop-ozone/recon/node_modules .mvn diff --git a/LICENSE.txt b/LICENSE.txt index 98fd68deb1cc..e7a35220a330 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -235,5 +235,5 @@ hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.7.9.min. hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.7.9.min.js -hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js -hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js +hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.5.1.min.js +hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.5.1.min.js diff --git a/dev-support/byteman/hcfs-write.btm b/dev-support/byteman/hcfs-write.btm new file mode 100644 index 000000000000..8e8373d532ce --- /dev/null +++ b/dev-support/byteman/hcfs-write.btm @@ -0,0 +1,111 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +RULE FileSystem.close +CLASS org.apache.hadoop.fs.FileSystem +METHOD close +IF TRUE +DO + System.out.println("Closing file system instance: " + System.identityHashCode($0)); + System.out.println(" write.call: " + readCounter("write.call")); + System.out.println(" write.allTime: " + readCounter("write.allTime")); + System.out.println(" hsync.call: " + readCounter("hsync.call")); + System.out.println(" hsync.allTime: " + readCounter("hsync.allTime")); + System.out.println(" hflush.call: " + readCounter("hflush.call")); + System.out.println(" hflush.allTime: " + readCounter("hflush.allTime")); + System.out.println(" close.call: " + readCounter("close.call")); + System.out.println(" close.allTime: " + readCounter("close.allTime")) + + +ENDRULE + +RULE DataOutputStream.Write.Entry +CLASS ^FSDataOutputStream$PositionCache +METHOD write +AT ENTRY +IF TRUE +DO resetTimer(Thread.currentThread()); + incrementCounter("write.call") +ENDRULE + +RULE DataOutputStream.Write.Exit +CLASS ^FSDataOutputStream$PositionCache +METHOD write +AT EXIT +BIND elapsedTime = java.lang.Math.toIntExact(getElapsedTimeFromTimer(Thread.currentThread())) +IF TRUE +DO + incrementCounter("write.allTime", elapsedTime) +ENDRULE + + +RULE FSDataOutputStream.Hsync.Entry +CLASS FSDataOutputStream +METHOD hsync +AT ENTRY +IF TRUE +DO resetTimer(Thread.currentThread()); + incrementCounter("hsync.call") +ENDRULE + +RULE FSDataOutputStream.Hsync.Exit +CLASS FSDataOutputStream +METHOD hsync +AT EXIT +BIND elapsedTime = java.lang.Math.toIntExact(getElapsedTimeFromTimer(Thread.currentThread())) +IF TRUE +DO + incrementCounter("hsync.allTime", elapsedTime) +ENDRULE + + +RULE FSDataOutputStream.Hflush.Entry +CLASS ^FSDataOutputStream +METHOD hflush +AT ENTRY +IF TRUE +DO resetTimer(Thread.currentThread()); + incrementCounter("hflush.call") +ENDRULE + +RULE FSDataOutputStream.Hflush.Exit +CLASS ^FSDataOutputStream +METHOD hflush +AT EXIT +BIND elapsedTime = java.lang.Math.toIntExact(getElapsedTimeFromTimer(Thread.currentThread())) +IF TRUE +DO + incrementCounter("hflush.allTime", elapsedTime) +ENDRULE + + +RULE FSDataOutputStream.Close.Entry +CLASS ^FSDataOutputStream +METHOD close +AT ENTRY +IF TRUE +DO resetTimer(Thread.currentThread()); + incrementCounter("close.call") +ENDRULE + +RULE FSDataOutputStream.Close.Exit +CLASS ^FSDataOutputStream +METHOD close +AT EXIT +BIND elapsedTime = java.lang.Math.toIntExact(getElapsedTimeFromTimer(Thread.currentThread())) +IF TRUE +DO + incrementCounter("close.allTime", elapsedTime) +ENDRULE diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index de6f8bf171bf..e7a8ebb73c90 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-client - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Client Library Apache Hadoop HDDS Client jar diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 4adfa8521b07..c2743c4e4113 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -247,6 +247,36 @@ public ContainerCommandResponseProto sendCommand( } } + @Override + public Map + sendCommandOnAllNodes( + ContainerCommandRequestProto request) throws IOException { + HashMap + responseProtoHashMap = new HashMap<>(); + List datanodeList = pipeline.getNodes(); + HashMap> + futureHashMap = new HashMap<>(); + for (DatanodeDetails dn : datanodeList) { + try { + futureHashMap.put(dn, sendCommandAsync(request, dn).getResponse()); + } catch (InterruptedException e) { + LOG.error("Command execution was interrupted."); + } + } + try{ + for (Map.Entry > + entry : futureHashMap.entrySet()){ + responseProtoHashMap.put(entry.getKey(), entry.getValue().get()); + } + } catch (InterruptedException e) { + LOG.error("Command execution was interrupted."); + } catch (ExecutionException e) { + LOG.error("Failed to execute command " + request, e); + } + return responseProtoHashMap; + } + @Override public ContainerCommandResponseProto sendCommand( ContainerCommandRequestProto request, List validators) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index 1c7779b4aed6..23fca738a16d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -22,6 +22,7 @@ import java.security.cert.X509Certificate; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.OptionalLong; import java.util.UUID; @@ -352,4 +353,10 @@ public XceiverClientReply sendCommandAsync( return asyncReply; } + @Override + public Map + sendCommandOnAllNodes(ContainerCommandRequestProto request) { + throw new UnsupportedOperationException( + "Operation Not supported for ratis client"); + } } diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 1f0b3e61f73a..cc171f163776 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-common - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Common Apache Hadoop HDDS Common jar @@ -183,6 +183,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-interface-client + + org.hamcrest + hamcrest-all + test + diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.sh b/hadoop-hdds/common/src/main/conf/hadoop-env.sh index e43cd95b047e..51ee585ba5f5 100644 --- a/hadoop-hdds/common/src/main/conf/hadoop-env.sh +++ b/hadoop-hdds/common/src/main/conf/hadoop-env.sh @@ -30,6 +30,9 @@ ## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults ## +# Enable core dump when crash in C++ +ulimit -c unlimited + # Many of the options here are built from the perspective that users # may want to provide OVERWRITING values on the command line. # For example: diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index b077da2737bb..2c7f3d5db683 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -19,6 +19,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; +import java.util.ServiceLoader; import java.util.concurrent.Callable; import org.apache.hadoop.fs.Path; @@ -26,6 +27,7 @@ import com.google.common.annotations.VisibleForTesting; import picocli.CommandLine; +import picocli.CommandLine.Command; import picocli.CommandLine.ExecutionException; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Option; @@ -52,6 +54,25 @@ public GenericCli() { cmd = new CommandLine(this); } + public GenericCli(Class type) { + this(); + addSubcommands(getCmd(), type); + } + + private void addSubcommands(CommandLine cli, Class type) { + ServiceLoader registeredSubcommands = + ServiceLoader.load(SubcommandWithParent.class); + for (SubcommandWithParent subcommand : registeredSubcommands) { + if (subcommand.getParentType().equals(type)) { + final Command commandAnnotation = + subcommand.getClass().getAnnotation(Command.class); + CommandLine subcommandCommandLine = new CommandLine(subcommand); + addSubcommands(subcommandCommandLine, subcommand.getClass()); + cli.addSubcommand(commandAnnotation.name(), subcommandCommandLine); + } + } + } + /** * Handle the error when subcommand is required but not set. */ diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/WithScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java similarity index 77% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/WithScmClient.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java index 9852d50fc8c9..8421ab19cfaa 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/WithScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java @@ -15,15 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdds.scm.cli.container; - -import org.apache.hadoop.hdds.scm.client.ScmClient; +package org.apache.hadoop.hdds.cli; /** - * Command which provides a SCM client based on the current config. + * Defineds parent command for SPI based subcommand registration. */ -public interface WithScmClient { +public interface SubcommandWithParent { - ScmClient createScmClient(); + /** + * Java type of the parent command. + */ + Class getParentType(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java index 8dcc1d1a3c91..aabad6f14464 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,4 +19,4 @@ /** * Generic helper class to make instantiate picocli based cli tools. */ -package org.apache.hadoop.hdds.cli; \ No newline at end of file +package org.apache.hadoop.hdds.cli; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 7fa77654ee7b..a3db139b96ce 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -49,6 +49,10 @@ public class DatanodeDetails extends NodeImpl implements private String hostName; private List ports; private String certSerialId; + private String version; + private long setupTime; + private String revision; + private String buildDate; /** * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used @@ -59,15 +63,25 @@ public class DatanodeDetails extends NodeImpl implements * @param networkLocation DataNode's network location path * @param ports Ports used by the DataNode * @param certSerialId serial id from SCM issued certificate. + * @param version DataNode's version + * @param setupTime the setup time of DataNode + * @param revision DataNodes's revision + * @param buildDate DataNodes's build timestamp */ + @SuppressWarnings("parameternumber") private DatanodeDetails(UUID uuid, String ipAddress, String hostName, - String networkLocation, List ports, String certSerialId) { + String networkLocation, List ports, String certSerialId, + String version, long setupTime, String revision, String buildDate) { super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); this.uuid = uuid; this.ipAddress = ipAddress; this.hostName = hostName; this.ports = ports; this.certSerialId = certSerialId; + this.version = version; + this.setupTime = setupTime; + this.revision = revision; + this.buildDate = buildDate; } public DatanodeDetails(DatanodeDetails datanodeDetails) { @@ -78,6 +92,11 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.hostName = datanodeDetails.hostName; this.ports = datanodeDetails.ports; this.setNetworkName(datanodeDetails.getNetworkName()); + this.setParent(datanodeDetails.getParent()); + this.version = datanodeDetails.version; + this.setupTime = datanodeDetails.setupTime; + this.revision = datanodeDetails.revision; + this.buildDate = datanodeDetails.buildDate; } /** @@ -206,6 +225,18 @@ public static DatanodeDetails getFromProtoBuf( if (datanodeDetailsProto.hasNetworkLocation()) { builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation()); } + if (datanodeDetailsProto.hasVersion()) { + builder.setVersion(datanodeDetailsProto.getVersion()); + } + if (datanodeDetailsProto.hasSetupTime()) { + builder.setSetupTime(datanodeDetailsProto.getSetupTime()); + } + if (datanodeDetailsProto.hasRevision()) { + builder.setRevision(datanodeDetailsProto.getRevision()); + } + if (datanodeDetailsProto.hasBuildDate()) { + builder.setBuildDate(datanodeDetailsProto.getBuildDate()); + } return builder.build(); } @@ -247,6 +278,20 @@ public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { .setValue(port.getValue()) .build()); } + + if (!Strings.isNullOrEmpty(getVersion())) { + builder.setVersion(getVersion()); + } + + builder.setSetupTime(getSetupTime()); + + if (!Strings.isNullOrEmpty(getRevision())) { + builder.setRevision(getRevision()); + } + if (!Strings.isNullOrEmpty(getBuildDate())) { + builder.setBuildDate(getBuildDate()); + } + return builder.build(); } @@ -299,6 +344,10 @@ public static final class Builder { private String networkLocation; private List ports; private String certSerialId; + private String version; + private long setupTime; + private String revision; + private String buildDate; /** * Default private constructor. To create Builder instance use @@ -387,6 +436,54 @@ public Builder setCertSerialId(String certId) { return this; } + /** + * Sets the DataNode version. + * + * @param ver the version of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setVersion(String ver) { + this.version = ver; + return this; + } + + /** + * Sets the DataNode revision. + * + * @param rev the revision of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setRevision(String rev) { + this.revision = rev; + return this; + } + + /** + * Sets the DataNode build date. + * + * @param date the build date of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setBuildDate(String date) { + this.buildDate = date; + return this; + } + + /** + * Sets the DataNode setup time. + * + * @param time the setup time of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setSetupTime(long time) { + this.setupTime = time; + return this; + } + /** * Builds and returns DatanodeDetails instance. * @@ -398,7 +495,8 @@ public DatanodeDetails build() { networkLocation = NetConstants.DEFAULT_RACK; } DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName, - networkLocation, ports, certSerialId); + networkLocation, ports, certSerialId, + version, setupTime, revision, buildDate); if (networkName != null) { dn.setNetworkName(networkName); } @@ -504,4 +602,76 @@ public String getCertSerialId() { public void setCertSerialId(String certSerialId) { this.certSerialId = certSerialId; } + + /** + * Returns the DataNode version. + * + * @return DataNode version + */ + public String getVersion() { + return version; + } + + /** + * Set DataNode version. + * + * @param version DataNode version + */ + public void setVersion(String version) { + this.version = version; + } + + /** + * Returns the DataNode setup time. + * + * @return DataNode setup time + */ + public long getSetupTime() { + return setupTime; + } + + /** + * Set DataNode setup time. + * + * @param setupTime DataNode setup time + */ + public void setSetupTime(long setupTime) { + this.setupTime = setupTime; + } + + /** + * Returns the DataNode revision. + * + * @return DataNode revision + */ + public String getRevision() { + return revision; + } + + /** + * Set DataNode revision. + * + * @param rev DataNode revision + */ + public void setRevision(String rev) { + this.revision = rev; + } + + /** + * Returns the DataNode build date. + * + * @return DataNode build date + */ + public String getBuildDate() { + return buildDate; + } + + /** + * Set DataNode build date. + * + * @param date DataNode build date + */ + public void setBuildDate(String date) { + this.buildDate = date; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 47523bc79a8f..8325f0963885 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -25,13 +25,13 @@ import java.util.List; import java.util.Map; import java.util.UUID; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.ratis.retrypolicy.RetryPolicyCreator; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.x509.SecurityConfig; @@ -39,7 +39,6 @@ import org.apache.ratis.RaftConfigKeys; import org.apache.ratis.client.RaftClient; import org.apache.ratis.client.RaftClientConfigKeys; -import org.apache.ratis.client.retry.RequestTypeDependentRetryPolicy; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.grpc.GrpcConfigKeys; import org.apache.ratis.grpc.GrpcFactory; @@ -49,20 +48,10 @@ import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.protocol.GroupMismatchException; -import org.apache.ratis.protocol.StateMachineException; -import org.apache.ratis.protocol.NotReplicatedException; -import org.apache.ratis.protocol.TimeoutIOException; -import org.apache.ratis.protocol.exceptions.ResourceUnavailableException; -import org.apache.ratis.retry.ExponentialBackoffRetry; -import org.apache.ratis.retry.MultipleLinearRandomRetry; -import org.apache.ratis.retry.ExceptionDependentRetry; -import org.apache.ratis.retry.RetryPolicies; import org.apache.ratis.retry.RetryPolicy; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.util.TimeDuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,17 +64,6 @@ public final class RatisHelper { // Prefix for Ratis Server GRPC and Ratis client conf. public static final String HDDS_DATANODE_RATIS_PREFIX_KEY = "hdds.ratis"; - private static final String RAFT_SERVER_PREFIX_KEY = "raft.server"; - public static final String HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY = - HDDS_DATANODE_RATIS_PREFIX_KEY + "." + RAFT_SERVER_PREFIX_KEY; - public static final String HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY = - HDDS_DATANODE_RATIS_PREFIX_KEY + "." + RaftClientConfigKeys.PREFIX; - public static final String HDDS_DATANODE_RATIS_GRPC_PREFIX_KEY = - HDDS_DATANODE_RATIS_PREFIX_KEY + "." + GrpcConfigKeys.PREFIX; - - private static final Class[] NO_RETRY_EXCEPTIONS = - new Class[] {NotReplicatedException.class, GroupMismatchException.class, - StateMachineException.class}; /* TODO: use a dummy id for all groups for the moment. * It should be changed to a unique id for each group. @@ -177,9 +155,9 @@ private static RpcType getRpcType(ConfigurationSource conf) { } public static RaftClient newRaftClient(RaftPeer leader, - ConfigurationSource conf) { + ConfigurationSource conf, GrpcTlsConfig tlsConfig) { return newRaftClient(getRpcType(conf), leader, - RatisHelper.createRetryPolicy(conf), conf); + RatisHelper.createRetryPolicy(conf), tlsConfig, conf); } public static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, @@ -227,7 +205,7 @@ private static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader, } /** - * Set all the properties matching with regex + * Set all client properties matching with regex * {@link RatisHelper#HDDS_DATANODE_RATIS_PREFIX_KEY} in * ozone configuration object and configure it to RaftProperties. * @param ozoneConf @@ -237,23 +215,26 @@ public static void createRaftClientProperties(ConfigurationSource ozoneConf, RaftProperties raftProperties) { // As for client we do not require server and grpc server/tls. exclude them. - Map ratisClientConf = ozoneConf.getPropsWithPrefix( - StringUtils.appendIfNotPresent(HDDS_DATANODE_RATIS_PREFIX_KEY, '.')); + Map ratisClientConf = + getDatanodeRatisPrefixProps(ozoneConf); ratisClientConf.forEach((key, val) -> { - if (key.startsWith(RaftClientConfigKeys.PREFIX) || isGrpcClientConfig( - key)) { + if (isClientConfig(key) || isGrpcClientConfig(key)) { raftProperties.set(key, val); } }); } + private static boolean isClientConfig(String key) { + return key.startsWith(RaftClientConfigKeys.PREFIX); + } + private static boolean isGrpcClientConfig(String key) { return key.startsWith(GrpcConfigKeys.PREFIX) && !key .startsWith(GrpcConfigKeys.TLS.PREFIX) && !key .startsWith(GrpcConfigKeys.Server.PREFIX); } /** - * Set all the properties matching with prefix + * Set all server properties matching with prefix * {@link RatisHelper#HDDS_DATANODE_RATIS_PREFIX_KEY} in * ozone configuration object and configure it to RaftProperties. * @param ozoneConf @@ -266,7 +247,7 @@ public static void createRaftServerProperties(ConfigurationSource ozoneConf, getDatanodeRatisPrefixProps(ozoneConf); ratisServerConf.forEach((key, val) -> { // Exclude ratis client configuration. - if (!key.startsWith(RaftClientConfigKeys.PREFIX)) { + if (!isClientConfig(key)) { raftProperties.set(key, val); } }); @@ -291,81 +272,17 @@ public static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf, return tlsConfig; } - /** - * Table mapping exception type to retry policy used for the exception in - * write and watch request. - * --------------------------------------------------------------------------- - * | Exception | RetryPolicy for | RetryPolicy for | - * | | Write request | Watch request | - * |-------------------------------------------------------------------------| - * | NotReplicatedException | NO_RETRY | NO_RETRY | - * |-------------------------------------------------------------------------| - * | GroupMismatchException | NO_RETRY | NO_RETRY | - * |-------------------------------------------------------------------------| - * | StateMachineException | NO_RETRY | NO_RETRY | - * |-------------------------------------------------------------------------| - * | TimeoutIOException | EXPONENTIAL_BACKOFF | NO_RETRY | - * |-------------------------------------------------------------------------| - * | ResourceUnavailableException| EXPONENTIAL_BACKOFF | EXPONENTIAL_BACKOFF | - * |-------------------------------------------------------------------------| - * | Others | MULTILINEAR_RANDOM | MULTILINEAR_RANDOM | - * | | _RETRY | _RETRY | - * --------------------------------------------------------------------------- - */ public static RetryPolicy createRetryPolicy(ConfigurationSource conf) { - RatisClientConfig ratisClientConfig = conf - .getObject(RatisClientConfig.class); - ExponentialBackoffRetry exponentialBackoffRetry = - createExponentialBackoffPolicy(ratisClientConfig); - MultipleLinearRandomRetry multipleLinearRandomRetry = - MultipleLinearRandomRetry - .parseCommaSeparated(ratisClientConfig.getMultilinearPolicy()); - - long writeTimeout = ratisClientConfig.getWriteRequestTimeoutInMs(); - long watchTimeout = ratisClientConfig.getWatchRequestTimeoutInMs(); - - return RequestTypeDependentRetryPolicy.newBuilder() - .setRetryPolicy(RaftProtos.RaftClientRequestProto.TypeCase.WRITE, - createExceptionDependentPolicy(exponentialBackoffRetry, - multipleLinearRandomRetry, exponentialBackoffRetry)) - .setRetryPolicy(RaftProtos.RaftClientRequestProto.TypeCase.WATCH, - createExceptionDependentPolicy(exponentialBackoffRetry, - multipleLinearRandomRetry, RetryPolicies.noRetry())) - .setTimeout(RaftProtos.RaftClientRequestProto.TypeCase.WRITE, - TimeDuration.valueOf(writeTimeout, TimeUnit.MILLISECONDS)) - .setTimeout(RaftProtos.RaftClientRequestProto.TypeCase.WATCH, - TimeDuration.valueOf(watchTimeout, TimeUnit.MILLISECONDS)) - .build(); - } - - private static ExponentialBackoffRetry createExponentialBackoffPolicy( - RatisClientConfig ratisClientConfig) { - long exponentialBaseSleep = - ratisClientConfig.getExponentialPolicyBaseSleepInMs(); - long exponentialMaxSleep = - ratisClientConfig.getExponentialPolicyMaxSleepInMs(); - return ExponentialBackoffRetry.newBuilder() - .setBaseSleepTime( - TimeDuration.valueOf(exponentialBaseSleep, TimeUnit.MILLISECONDS)) - .setMaxSleepTime( - TimeDuration.valueOf(exponentialMaxSleep, TimeUnit.MILLISECONDS)) - .build(); - } - - private static ExceptionDependentRetry createExceptionDependentPolicy( - ExponentialBackoffRetry exponentialBackoffRetry, - MultipleLinearRandomRetry multipleLinearRandomRetry, - RetryPolicy timeoutPolicy) { - ExceptionDependentRetry.Builder builder = - ExceptionDependentRetry.newBuilder(); - for (Class c : NO_RETRY_EXCEPTIONS) { - builder.setExceptionToPolicy(c, RetryPolicies.noRetry()); + try { + RatisClientConfig scmClientConfig = + conf.getObject(RatisClientConfig.class); + Class policyClass = getClass( + scmClientConfig.getRetryPolicy(), + RetryPolicyCreator.class); + return policyClass.newInstance().create(conf); + } catch (Exception e) { + throw new RuntimeException(e); } - return builder.setExceptionToPolicy(ResourceUnavailableException.class, - exponentialBackoffRetry) - .setExceptionToPolicy(TimeoutIOException.class, timeoutPolicy) - .setDefaultPolicy(multipleLinearRandomRetry) - .build(); } public static Long getMinReplicatedIndex( @@ -373,4 +290,18 @@ public static Long getMinReplicatedIndex( return commitInfos.stream().map(RaftProtos.CommitInfoProto::getCommitIndex) .min(Long::compareTo).orElse(null); } + + private static Class getClass(String name, + Class xface) { + try { + Class theClass = Class.forName(name); + if (!xface.isAssignableFrom(theClass)) { + throw new RuntimeException(theClass + " not " + xface.getName()); + } else { + return theClass.asSubclass(xface); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java index 18d174d193b8..7db60598e10d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/conf/RatisClientConfig.java @@ -23,13 +23,15 @@ import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.ratis.client.RaftClientConfigKeys; +import java.time.Duration; + import static org.apache.hadoop.hdds.conf.ConfigTag.CLIENT; import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE; /** * Configuration related to Ratis Client. This is the config used in creating - * RaftClient creation. + * RaftClient. */ @ConfigGroup(prefix = RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY) public class RatisClientConfig { @@ -38,7 +40,9 @@ public class RatisClientConfig { * Configurations which will be set in RaftProperties. RaftProperties is a * configuration object for Ratis client. */ - @ConfigGroup(prefix = RaftClientConfigKeys.PREFIX) + @ConfigGroup(prefix = + RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY + "." + + RaftClientConfigKeys.PREFIX) public static class RaftConfig { @Config(key = "async.outstanding-requests.max", defaultValue = "32", @@ -47,7 +51,7 @@ public static class RaftConfig { description = "Controls the maximum number of outstanding async requests that can" + " be handled by the Standalone as well as Ratis client.") - private int maxOutstandingRequests; + private int maxOutstandingRequests = 32; public int getMaxOutstandingRequests() { return maxOutstandingRequests; @@ -65,14 +69,14 @@ public void setMaxOutstandingRequests(int maxOutstandingRequests) { "The timeout duration for ratis client request (except " + "for watch request). It should be set greater than leader " + "election timeout in Ratis.") - private long rpcRequestTimeout = 60 * 1000; + private long rpcRequestTimeout = Duration.ofSeconds(60).toMillis(); - public long getRpcRequestTimeout() { - return rpcRequestTimeout; + public Duration getRpcRequestTimeout() { + return Duration.ofMillis(rpcRequestTimeout); } - public void setRpcRequestTimeout(long rpcRequestTimeout) { - this.rpcRequestTimeout = rpcRequestTimeout; + public void setRpcRequestTimeout(Duration duration) { + this.rpcRequestTimeout = duration.toMillis(); } @Config(key = "rpc.watch.request.timeout", @@ -83,14 +87,14 @@ public void setRpcRequestTimeout(long rpcRequestTimeout) { "The timeout duration for ratis client watch request. " + "Timeout for the watch API in Ratis client to acknowledge a " + "particular request getting replayed to all servers.") - private long rpcWatchRequestTimeout = 180 * 1000; + private long rpcWatchRequestTimeout = Duration.ofSeconds(180).toMillis(); - public long getRpcWatchRequestTimeout() { - return rpcWatchRequestTimeout; + public Duration getRpcWatchRequestTimeout() { + return Duration.ofMillis(rpcWatchRequestTimeout); } - public void setRpcWatchRequestTimeout(long rpcWatchRequestTimeout) { - this.rpcWatchRequestTimeout = rpcWatchRequestTimeout; + public void setRpcWatchRequestTimeout(Duration duration) { + this.rpcWatchRequestTimeout = duration.toMillis(); } } @@ -99,14 +103,15 @@ public void setRpcWatchRequestTimeout(long rpcWatchRequestTimeout) { type = ConfigType.TIME, tags = { OZONE, CLIENT, PERFORMANCE }, description = "Timeout for ratis client write request.") - private long writeRequestTimeoutInMs; + private long writeRequestTimeoutInMs = + Duration.ofMinutes(5).toMillis(); - public long getWriteRequestTimeoutInMs() { - return writeRequestTimeoutInMs; + public Duration getWriteRequestTimeout() { + return Duration.ofMillis(writeRequestTimeoutInMs); } - public void setWriteRequestTimeoutInMs(long writeRequestTimeOut) { - this.writeRequestTimeoutInMs = writeRequestTimeOut; + public void setWriteRequestTimeout(Duration duration) { + writeRequestTimeoutInMs = duration.toMillis(); } @Config(key = "client.request.watch.timeout", @@ -114,14 +119,14 @@ public void setWriteRequestTimeoutInMs(long writeRequestTimeOut) { type = ConfigType.TIME, tags = { OZONE, CLIENT, PERFORMANCE }, description = "Timeout for ratis client watch request.") - private long watchRequestTimeoutInMs; + private long watchRequestTimeoutInMs = Duration.ofMinutes(3).toMillis(); - public long getWatchRequestTimeoutInMs() { - return watchRequestTimeoutInMs; + public Duration getWatchRequestTimeout() { + return Duration.ofMillis(watchRequestTimeoutInMs); } - public void setWatchRequestTimeoutInMs(long watchRequestTimeoutInMs) { - this.watchRequestTimeoutInMs = watchRequestTimeoutInMs; + public void setWatchRequestTimeout(Duration duration) { + watchRequestTimeoutInMs = duration.toMillis(); } @Config(key = "client.multilinear.random.retry.policy", @@ -151,15 +156,15 @@ public void setMultilinearPolicy(String multilinearPolicy) { + " With the default base sleep of 4s, the sleep duration for ith" + " retry is min(4 * pow(2, i), max_sleep) * r, where r is " + "random number in the range [0.5, 1.5).") - private long exponentialPolicyBaseSleepInMs; + private long exponentialPolicyBaseSleepInMs = + Duration.ofSeconds(4).toMillis(); - public long getExponentialPolicyBaseSleepInMs() { - return exponentialPolicyBaseSleepInMs; + public Duration getExponentialPolicyBaseSleep() { + return Duration.ofMillis(exponentialPolicyBaseSleepInMs); } - public void setExponentialPolicyBaseSleepInMs( - long exponentialPolicyBaseSleepInMs) { - this.exponentialPolicyBaseSleepInMs = exponentialPolicyBaseSleepInMs; + public void setExponentialPolicyBaseSleep(Duration duration) { + exponentialPolicyBaseSleepInMs = duration.toMillis(); } @Config(key = "client.exponential.backoff.max.sleep", @@ -170,14 +175,49 @@ public void setExponentialPolicyBaseSleepInMs( + "policy is limited by the configured max sleep. Refer " + "dfs.ratis.client.exponential.backoff.base.sleep for further " + "details.") - private long exponentialPolicyMaxSleepInMs; + private long exponentialPolicyMaxSleepInMs = + Duration.ofSeconds(40).toMillis(); + + public Duration getExponentialPolicyMaxSleep() { + return Duration.ofMillis(exponentialPolicyMaxSleepInMs); + } + + public void setExponentialPolicyMaxSleep(Duration duration) { + exponentialPolicyMaxSleepInMs = duration.toMillis(); + } + + @Config(key = "client.retrylimited.retry.interval", + defaultValue = "1s", + type = ConfigType.TIME, + tags = { OZONE, CLIENT, PERFORMANCE }, + description = "Interval between successive retries for " + + "a ratis client request.") + private long retrylimitedRetryInterval; - public long getExponentialPolicyMaxSleepInMs() { - return exponentialPolicyMaxSleepInMs; + public long getRetrylimitedRetryInterval() { + return retrylimitedRetryInterval; } - public void setExponentialPolicyMaxSleepInMs( - long exponentialPolicyMaxSleepInMs) { - this.exponentialPolicyMaxSleepInMs = exponentialPolicyMaxSleepInMs; + @Config(key = "client.retrylimited.max.retries", + defaultValue = "180", + type = ConfigType.INT, + tags = { OZONE, CLIENT, PERFORMANCE }, + description = "Number of retries for ratis client request.") + private int retrylimitedMaxRetries; + + public int getRetrylimitedMaxRetries() { + return retrylimitedMaxRetries; + } + + @Config(key = "client.retry.policy", + defaultValue = "org.apache.hadoop.hdds.ratis.retrypolicy." + + "RequestTypeDependentRetryPolicyCreator", + type = ConfigType.STRING, + tags = { OZONE, CLIENT, PERFORMANCE }, + description = "The class name of the policy for retry.") + private String retryPolicy; + + public String getRetryPolicy() { + return retryPolicy; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java new file mode 100644 index 000000000000..fe92f3217b10 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.ratis.retrypolicy; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.ratis.client.retry.RequestTypeDependentRetryPolicy; +import org.apache.ratis.proto.RaftProtos; +import org.apache.ratis.protocol.GroupMismatchException; +import org.apache.ratis.protocol.NotReplicatedException; +import org.apache.ratis.protocol.StateMachineException; +import org.apache.ratis.protocol.TimeoutIOException; +import org.apache.ratis.protocol.exceptions.ResourceUnavailableException; +import org.apache.ratis.retry.*; +import org.apache.ratis.util.TimeDuration; + +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +/** + * Table mapping exception type to retry policy used for the exception in + * write and watch request. + * --------------------------------------------------------------------------- + * | Exception | RetryPolicy for | RetryPolicy for | + * | | Write request | Watch request | + * |-------------------------------------------------------------------------| + * | NotReplicatedException | NO_RETRY | NO_RETRY | + * |-------------------------------------------------------------------------| + * | GroupMismatchException | NO_RETRY | NO_RETRY | + * |-------------------------------------------------------------------------| + * | StateMachineException | NO_RETRY | NO_RETRY | + * |-------------------------------------------------------------------------| + * | TimeoutIOException | EXPONENTIAL_BACKOFF | NO_RETRY | + * |-------------------------------------------------------------------------| + * | ResourceUnavailableException| EXPONENTIAL_BACKOFF | EXPONENTIAL_BACKOFF | + * |-------------------------------------------------------------------------| + * | Others | MULTILINEAR_RANDOM | MULTILINEAR_RANDOM | + * | | _RETRY | _RETRY | + * --------------------------------------------------------------------------- + */ +public class RequestTypeDependentRetryPolicyCreator + implements RetryPolicyCreator { + + private static final Class[] NO_RETRY_EXCEPTIONS = + new Class[] {NotReplicatedException.class, GroupMismatchException.class, + StateMachineException.class}; + + @Override + public RetryPolicy create(ConfigurationSource conf) { + RatisClientConfig ratisClientConfig = conf + .getObject(RatisClientConfig.class); + ExponentialBackoffRetry exponentialBackoffRetry = + createExponentialBackoffPolicy(ratisClientConfig); + MultipleLinearRandomRetry multipleLinearRandomRetry = + MultipleLinearRandomRetry + .parseCommaSeparated(ratisClientConfig.getMultilinearPolicy()); + + return RequestTypeDependentRetryPolicy.newBuilder() + .setRetryPolicy(RaftProtos.RaftClientRequestProto.TypeCase.WRITE, + createExceptionDependentPolicy(exponentialBackoffRetry, + multipleLinearRandomRetry, exponentialBackoffRetry)) + .setRetryPolicy(RaftProtos.RaftClientRequestProto.TypeCase.WATCH, + createExceptionDependentPolicy(exponentialBackoffRetry, + multipleLinearRandomRetry, RetryPolicies.noRetry())) + .setTimeout(RaftProtos.RaftClientRequestProto.TypeCase.WRITE, + toTimeDuration(ratisClientConfig.getWriteRequestTimeout())) + .setTimeout(RaftProtos.RaftClientRequestProto.TypeCase.WATCH, + toTimeDuration(ratisClientConfig.getWatchRequestTimeout())) + .build(); + } + + private static ExponentialBackoffRetry createExponentialBackoffPolicy( + RatisClientConfig ratisClientConfig) { + return ExponentialBackoffRetry.newBuilder() + .setBaseSleepTime( + toTimeDuration(ratisClientConfig.getExponentialPolicyBaseSleep())) + .setMaxSleepTime( + toTimeDuration(ratisClientConfig.getExponentialPolicyMaxSleep())) + .build(); + } + + private static ExceptionDependentRetry createExceptionDependentPolicy( + ExponentialBackoffRetry exponentialBackoffRetry, + MultipleLinearRandomRetry multipleLinearRandomRetry, + RetryPolicy timeoutPolicy) { + ExceptionDependentRetry.Builder builder = + ExceptionDependentRetry.newBuilder(); + for (Class c : NO_RETRY_EXCEPTIONS) { + builder.setExceptionToPolicy(c, RetryPolicies.noRetry()); + } + return builder.setExceptionToPolicy(ResourceUnavailableException.class, + exponentialBackoffRetry) + .setExceptionToPolicy(TimeoutIOException.class, timeoutPolicy) + .setDefaultPolicy(multipleLinearRandomRetry) + .build(); + } + private static TimeDuration toTimeDuration(Duration duration) { + return toTimeDuration(duration.toMillis()); + } + + private static TimeDuration toTimeDuration(long milliseconds) { + return TimeDuration.valueOf(milliseconds, TimeUnit.MILLISECONDS); + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryLimitedPolicyCreator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryLimitedPolicyCreator.java new file mode 100644 index 000000000000..5c3b06a17191 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryLimitedPolicyCreator.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.ratis.retrypolicy; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.ratis.retry.RetryPolicies; +import org.apache.ratis.retry.RetryPolicy; +import org.apache.ratis.util.TimeDuration; + +import java.util.concurrent.TimeUnit; + +/** + * The creator of RetryLimited policy. + */ +public class RetryLimitedPolicyCreator implements RetryPolicyCreator { + + @Override + public RetryPolicy create(ConfigurationSource conf) { + RatisClientConfig scmClientConfig = + conf.getObject(RatisClientConfig.class); + int maxRetryCount = + scmClientConfig.getRetrylimitedMaxRetries(); + long retryInterval = scmClientConfig.getRetrylimitedRetryInterval(); + TimeDuration sleepDuration = + TimeDuration.valueOf(retryInterval, TimeUnit.MILLISECONDS); + RetryPolicy retryPolicy = RetryPolicies + .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration); + return retryPolicy; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryPolicyCreator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryPolicyCreator.java new file mode 100644 index 000000000000..8057baa02013 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RetryPolicyCreator.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.ratis.retrypolicy; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.ratis.retry.RetryPolicy; + +/** + * The interface of RetryLimited policy creator. + */ +public interface RetryPolicyCreator { + RetryPolicy create(ConfigurationSource conf); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/package-info.java new file mode 100644 index 000000000000..657a2bfb3cf7 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.ratis.retrypolicy; + +/** + * This package contains classes related to retry policies. + */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java new file mode 100644 index 000000000000..c829e2eab651 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm; + +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; + +import java.util.List; + +/** + * A {@link PipelineChoosePolicy} support choosing pipeline from exist list. + */ +public interface PipelineChoosePolicy { + + /** + * Given an initial list of pipelines, return one of the pipelines. + * + * @param pipelineList list of pipelines. + * @return one of the pipelines. + */ + Pipeline choosePipeline(List pipelineList, + PipelineRequestInformation pri); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java new file mode 100644 index 000000000000..ac0cfbe57beb --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm; + +/** + * The information of the request of pipeline. + */ +public final class PipelineRequestInformation { + private long size; + + /** + * Builder for PipelineRequestInformation. + */ + public static class Builder { + private long size; + + public static Builder getBuilder() { + return new Builder(); + } + + /** + * sets the size. + * @param sz request size + * @return Builder for PipelineRequestInformation + */ + public Builder setSize(long sz) { + this.size = sz; + return this; + } + + public PipelineRequestInformation build() { + return new PipelineRequestInformation(size); + } + } + + private PipelineRequestInformation(long size) { + this.size = size; + } + + public long getSize() { + return size; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java index 73701ea55c9d..3084bb4cc396 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java @@ -59,6 +59,21 @@ public class ScmConfig { ) private String action; + @Config(key = "pipeline.choose.policy.impl", + type = ConfigType.STRING, + defaultValue = "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms" + + ".RandomPipelineChoosePolicy", + tags = { ConfigTag.SCM, ConfigTag.PIPELINE }, + description = + "The full name of class which implements " + + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. " + + "The class decides which pipeline will be used to find or " + + "allocate container. If not set, " + + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms. " + + "RandomPipelineChoosePolicy will be used as default value." + ) + private String pipelineChoosePolicyName; + public void setKerberosPrincipal(String kerberosPrincipal) { this.principal = kerberosPrincipal; } @@ -72,6 +87,10 @@ public void setUnknownContainerAction(String unknownContainerAction) { this.action = unknownContainerAction; } + public void setPipelineChoosePolicyName(String pipelineChoosePolicyName) { + this.pipelineChoosePolicyName = pipelineChoosePolicyName; + } + public String getKerberosPrincipal() { return this.principal; } @@ -84,6 +103,10 @@ public String getUnknownContainerAction() { return this.action; } + public String getPipelineChoosePolicyName() { + return pipelineChoosePolicyName; + } + /** * Configuration strings class. * required for SCMSecurityProtocol where the KerberosInfo references diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 2efd82b62977..4e624c695889 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -44,9 +44,11 @@ public final class ScmConfigKeys { = "dfs.container.ratis.rpc.type"; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; - public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY - = "dfs.container.ratis.num.write.chunk.threads"; - public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT + public static final String + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME + = "dfs.container.ratis.num.write.chunk.threads.per.volume"; + public static final int + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY = "dfs.container.ratis.replication.level"; @@ -287,6 +289,7 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT = "ozone.scm.pipeline.owner.container.count"; public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3; + // Pipeline placement policy: // Upper limit for how many pipelines a datanode can engage in. public static final String OZONE_DATANODE_PIPELINE_LIMIT = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java index 328777799bd1..1c7d1f6408d8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java @@ -21,10 +21,12 @@ import java.io.Closeable; import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -186,4 +188,13 @@ public abstract XceiverClientReply watchForCommit(long index) * @return min commit index replicated to all servers. */ public abstract long getReplicatedMinCommitIndex(); + + /** + * Sends command to all nodes in the pipeline. + * @return a map containing datanode as the key and + * the command response from that datanode + */ + public abstract Map + sendCommandOnAllNodes(ContainerCommandRequestProto request) + throws IOException, InterruptedException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java index db1f82ae411d..0146eaed6e33 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java @@ -122,6 +122,7 @@ public enum ResultCodes { FAILED_TO_FIND_ACTIVE_PIPELINE, FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY, FAILED_TO_ALLOCATE_ENOUGH_BLOCKS, - INTERNAL_ERROR + INTERNAL_ERROR, + FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index 6b0d8f8cda2a..11acf82ff32f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.storage; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.common.helpers @@ -73,7 +74,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutionException; /** @@ -578,4 +581,35 @@ public static List getValidatorList() { validators.add(validator); return validators; } + + public static HashMap + getBlockFromAllNodes( + XceiverClientSpi xceiverClient, + DatanodeBlockID datanodeBlockID) throws IOException, + InterruptedException { + GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto + .newBuilder() + .setBlockID(datanodeBlockID); + HashMap datanodeToResponseMap + = new HashMap<>(); + String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); + ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto + .newBuilder() + .setCmdType(Type.GetBlock) + .setContainerID(datanodeBlockID.getContainerID()) + .setDatanodeUuid(id) + .setGetBlock(readBlockRequest); + String encodedToken = getEncodedBlockToken(getService(datanodeBlockID)); + if (encodedToken != null) { + builder.setEncodedToken(encodedToken); + } + ContainerCommandRequestProto request = builder.build(); + Map responses = + xceiverClient.sendCommandOnAllNodes(request); + for(Map.Entry entry: + responses.entrySet()){ + datanodeToResponseMap.put(entry.getKey(), entry.getValue().getGetBlock()); + } + return datanodeToResponseMap; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 7d46b01a6dbf..482ac88f366c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -79,15 +79,6 @@ public final class OzoneConfigKeys { "ozone.trace.enabled"; public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false; - public static final String OZONE_METADATA_STORE_IMPL = - "ozone.metastore.impl"; - public static final String OZONE_METADATA_STORE_IMPL_LEVELDB = - "LevelDB"; - public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB = - "RocksDB"; - public static final String OZONE_METADATA_STORE_IMPL_DEFAULT = - OZONE_METADATA_STORE_IMPL_ROCKSDB; - public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = "ozone.metastore.rocksdb.statistics"; @@ -104,6 +95,9 @@ public final class OzoneConfigKeys { public static final String OZONE_CONTAINER_CACHE_SIZE = "ozone.container.cache.size"; public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024; + public static final String OZONE_CONTAINER_CACHE_LOCK_STRIPES = + "ozone.container.cache.lock.stripes"; + public static final int OZONE_CONTAINER_CACHE_LOCK_STRIPES_DEFAULT = 1024; public static final String OZONE_SCM_BLOCK_SIZE = "ozone.scm.block.size"; @@ -248,10 +242,13 @@ public final class OzoneConfigKeys { = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT; + public static final String + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY + = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; + public static final int + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + = ScmConfigKeys. + DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; public static final ReplicationLevel @@ -471,6 +468,7 @@ public final class OzoneConfigKeys { public static final String OZONE_CLIENT_HTTPS_NEED_AUTH_KEY = "ozone.https.client.need-auth"; public static final boolean OZONE_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false; + /** * There is no need to instantiate this class. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index a6833a5ee990..9854d40494be 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -293,9 +293,15 @@ private OzoneConsts() { public static final String MAX_PARTS = "maxParts"; public static final String S3_BUCKET = "s3Bucket"; public static final String S3_GETSECRET_USER = "S3GetSecretUser"; + public static final String RENAMED_KEYS_MAP = "renamedKeysMap"; + public static final String UNRENAMED_KEYS_MAP = "unRenamedKeysMap"; public static final String MULTIPART_UPLOAD_PART_NUMBER = "partNumber"; public static final String MULTIPART_UPLOAD_PART_NAME = "partName"; public static final String BUCKET_ENCRYPTION_KEY = "bucketEncryptionKey"; + public static final String DELETED_KEYS_LIST = "deletedKeysList"; + public static final String UNDELETED_KEYS_LIST = "unDeletedKeysList"; + public static final String SOURCE_VOLUME = "sourceVolume"; + public static final String SOURCE_BUCKET = "sourceBucket"; @@ -360,4 +366,9 @@ private OzoneConsts() { public static final String TRANSACTION_INFO_KEY = "#TRANSACTIONINFO"; public static final String TRANSACTION_INFO_SPLIT_KEY = "#"; + public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB"; + public static final String CONTAINER_DB_TYPE_LEVELDB = "LevelDB"; + + // An on-disk transient marker file used when replacing DB with checkpoint + public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java index 25f8e8630035..d5e1348027a0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java @@ -168,7 +168,7 @@ public File getCurrentDir() { * * @return the version file path */ - private File getVersionFile() { + public File getVersionFile() { return new File(getCurrentDir(), STORAGE_FILE_VERSION); } diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 8b724d9818e7..577044815579 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -37,6 +37,16 @@ size of that cache. + + ozone.container.cache.lock.stripes + 1024 + PERFORMANCE, CONTAINER, STORAGE + Container DB open is an exclusive operation. We use a stripe + lock to guarantee that different threads can open different container DBs + concurrently, while for one container DB, only one thread can open it at + the same time. This setting controls the lock stripes. + + dfs.container.ipc 9859 @@ -196,7 +206,7 @@ - dfs.container.ratis.num.write.chunk.threads + dfs.container.ratis.num.write.chunk.threads.per.volume 10 OZONE, RATIS, PERFORMANCE Maximum number of threads in the thread pool that Datanode @@ -634,17 +644,6 @@ dfs.container.ratis.datanode.storage.dir be configured separately. - - ozone.metastore.impl - RocksDB - OZONE, OM, SCM, CONTAINER, STORAGE - - Ozone metadata store implementation. Ozone metadata are well - distributed to multiple services such as ozoneManager, scm. They are stored in - some local key-value databases. This property determines which database - library to use. Supported value is either LevelDB or RocksDB. - - ozone.metastore.rocksdb.statistics @@ -819,7 +818,7 @@ ozone.scm.pipeline.owner.container.count 3 OZONE, SCM, PIPELINE - Number of containers per owner in a pipeline. + Number of containers per owner per disk in a pipeline. @@ -2291,15 +2290,6 @@ Whether to enable topology aware read to improve the read performance. - - ozone.recon.container.db.impl - RocksDB - OZONE, RECON, STORAGE - - Ozone Recon container DB store implementation.Supported value is either - LevelDB or RocksDB. - - ozone.recon.om.db.dir @@ -2441,6 +2431,14 @@ information will be extracted + + ozone.s3g.client.buffer.size + OZONE, S3GATEWAY + 4KB + + The size of the buffer which is for read block. (4KB by default). + + ssl.server.keystore.keypassword OZONE, SECURITY, MANAGEMENT @@ -2498,4 +2496,19 @@ rules in Amazon S3's object key naming guide. + + + ozone.om.enable.filesystem.paths + OZONE, OM + false + If true, key names will be interpreted as file system paths. + "/" will be treated as a special character and paths will be normalized + and must follow Unix filesystem path naming conventions. This flag will + be helpful when objects created by S3G need to be accessed using OFS/O3Fs. + If false, it will fallback to default behavior of Key/MPU create + requests where key paths are not normalized and any intermediate + directories will not be created or any file checks happens to check + filesystem semantics. + + diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/conf/TestRaftClientConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/conf/TestRaftClientConfig.java new file mode 100644 index 000000000000..46edc2005cf5 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/conf/TestRaftClientConfig.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.ratis.conf; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.junit.jupiter.api.Test; + +import java.time.Duration; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Tests for {@link RatisClientConfig.RaftConfig}. + */ +class TestRaftClientConfig { + + @Test + void defaults() { + RatisClientConfig.RaftConfig subject = new RatisClientConfig.RaftConfig(); + RatisClientConfig.RaftConfig fromConfig = + new OzoneConfiguration().getObject(RatisClientConfig.RaftConfig.class); + + assertEquals(fromConfig.getMaxOutstandingRequests(), + subject.getMaxOutstandingRequests()); + assertEquals(fromConfig.getRpcRequestTimeout(), + subject.getRpcRequestTimeout()); + assertEquals(fromConfig.getRpcWatchRequestTimeout(), + subject.getRpcWatchRequestTimeout()); + } + + @Test + void setAndGet() { + RatisClientConfig.RaftConfig subject = new RatisClientConfig.RaftConfig(); + final int maxOutstandingRequests = 42; + final Duration rpcRequestTimeout = Duration.ofMillis(12313); + final Duration rpcWatchRequestTimeout = Duration.ofSeconds(99); + + subject.setMaxOutstandingRequests(maxOutstandingRequests); + subject.setRpcRequestTimeout(rpcRequestTimeout); + subject.setRpcWatchRequestTimeout(rpcWatchRequestTimeout); + + assertEquals(maxOutstandingRequests, subject.getMaxOutstandingRequests()); + assertEquals(rpcRequestTimeout, subject.getRpcRequestTimeout()); + assertEquals(rpcWatchRequestTimeout, subject.getRpcWatchRequestTimeout()); + } + +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/conf/TestRatisClientConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/conf/TestRatisClientConfig.java new file mode 100644 index 000000000000..2d16af2a0cbd --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/conf/TestRatisClientConfig.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.ratis.conf; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.junit.jupiter.api.Test; + +import java.time.Duration; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Tests for {@link RatisClientConfig}. + */ +class TestRatisClientConfig { + + @Test + void defaults() { + RatisClientConfig fromConfig = new OzoneConfiguration() + .getObject(RatisClientConfig.class); + + RatisClientConfig subject = new RatisClientConfig(); + + assertEquals(fromConfig.getExponentialPolicyBaseSleep(), + subject.getExponentialPolicyBaseSleep()); + assertEquals(fromConfig.getExponentialPolicyMaxSleep(), + subject.getExponentialPolicyMaxSleep()); + assertEquals(fromConfig.getWatchRequestTimeout(), + subject.getWatchRequestTimeout()); + assertEquals(fromConfig.getWriteRequestTimeout(), + subject.getWriteRequestTimeout()); + } + + @Test + void setAndGet() { + RatisClientConfig subject = new RatisClientConfig(); + final Duration baseSleep = Duration.ofSeconds(12); + final Duration maxSleep = Duration.ofMinutes(2); + final Duration watchRequestTimeout = Duration.ofMillis(555); + final Duration writeRequestTimeout = Duration.ofMillis(444); + + subject.setExponentialPolicyBaseSleep(baseSleep); + subject.setExponentialPolicyMaxSleep(maxSleep); + subject.setWatchRequestTimeout(watchRequestTimeout); + subject.setWriteRequestTimeout(writeRequestTimeout); + + assertEquals(baseSleep, subject.getExponentialPolicyBaseSleep()); + assertEquals(maxSleep, subject.getExponentialPolicyMaxSleep()); + assertEquals(watchRequestTimeout, subject.getWatchRequestTimeout()); + assertEquals(writeRequestTimeout, subject.getWriteRequestTimeout()); + } + +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java index 55ba6ab72fa7..30cdc62c5dc3 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java @@ -25,13 +25,20 @@ import java.io.File; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.audit.AuditEventStatus.FAILURE; import static org.apache.hadoop.ozone.audit.AuditEventStatus.SUCCESS; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.StringContains.containsString; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import org.hamcrest.Matcher; +import org.hamcrest.collection.IsIterableContainingInOrder; + /** * Test Ozone Audit Logger. @@ -143,7 +150,35 @@ public void notLogReadEvents() throws IOException { verifyNoLog(); } - private void verifyLog(String expected) throws IOException { + /** + * Test to verify if multiline entries can be checked. + */ + + @Test + public void messageIncludesMultilineException() throws IOException { + String exceptionMessage = "Dummy exception message"; + TestException testException = new TestException(exceptionMessage); + AuditMessage exceptionAuditMessage = + new AuditMessage.Builder() + .setUser(USER) + .atIp(IP_ADDRESS) + .forOperation(DummyAction.CREATE_VOLUME) + .withParams(PARAMS) + .withResult(FAILURE) + .withException(testException).build(); + AUDIT.logWriteFailure(exceptionAuditMessage); + verifyLog( + "ERROR | OMAudit | user=john | " + + "ip=192.168.0.1 | op=CREATE_VOLUME " + + "{key1=value1, key2=value2} | ret=FAILURE", + "org.apache.hadoop.ozone.audit." + + "TestOzoneAuditLogger$TestException: Dummy exception message", + "at org.apache.hadoop.ozone.audit.TestOzoneAuditLogger" + + ".messageIncludesMultilineException" + + "(TestOzoneAuditLogger.java"); + } + + private void verifyLog(String... expectedStrings) throws IOException { File file = new File("audit.log"); List lines = FileUtils.readLines(file, (String)null); final int retry = 5; @@ -158,11 +193,11 @@ private void verifyLog(String expected) throws IOException { } i++; } - - // When log entry is expected, the log file will contain one line and - // that must be equal to the expected string - assertTrue(lines.size() != 0); - assertTrue(expected.equalsIgnoreCase(lines.get(0))); + //check if every expected string can be found in the log entry + assertThat( + lines.subList(0, expectedStrings.length), + containsInOrder(expectedStrings) + ); //empty the file lines.clear(); FileUtils.writeLines(file, lines, false); @@ -174,4 +209,19 @@ private void verifyNoLog() throws IOException { // When no log entry is expected, the log file must be empty assertEquals(0, lines.size()); } + + private class TestException extends Exception{ + TestException(String message) { + super(message); + } + } + + private Matcher> containsInOrder( + String[] expectedStrings) { + return IsIterableContainingInOrder.contains( + Arrays.stream(expectedStrings) + .map(str -> containsString(str)) + .collect(Collectors.toList()) + ); + } } diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 105e8ac8f096..98ac44df7899 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-config - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Config Tools Apache Hadoop HDDS Config jar diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/InMemoryConfiguration.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/InMemoryConfiguration.java new file mode 100644 index 000000000000..0bea7af82a49 --- /dev/null +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/InMemoryConfiguration.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +/** + * In memory, mutable configuration source for testing. + */ +public class InMemoryConfiguration implements MutableConfigurationSource { + + private Map configs = new HashMap<>(); + + public InMemoryConfiguration() { + } + + public InMemoryConfiguration(String key, String value) { + set(key, value); + } + + @Override + public String get(String key) { + return configs.get(key); + } + + @Override + public Collection getConfigKeys() { + return configs.keySet(); + } + + @Override + public char[] getPassword(String key) throws IOException { + return configs.get(key).toCharArray(); + } + + @Override + public void set(String key, String value) { + configs.put(key, value); + } +} diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index d10d2a358ce1..b71f8e3471e7 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-container-service - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Container Service Apache Hadoop HDDS Container Service jar @@ -70,6 +70,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.yaml snakeyaml + + + org.apache.hadoop + hadoop-hdds-docs + provided + + com.github.spotbugs spotbugs @@ -107,6 +114,37 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/dev-support/findbugsExcludeFile.xml + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-common-html + prepare-package + + unpack + + + + + org.apache.hadoop + hadoop-hdds-server-framework + ${project.build.outputDirectory} + + webapps/static/**/*.* + + + org.apache.hadoop + hadoop-hdds-docs + ${project.build.outputDirectory}/webapps/hddsDatanode + docs/**/*.* + + + true + + + + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java new file mode 100644 index 000000000000..d36fcdb6fc70 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.server.ServiceRuntimeInfo; + +/** + * This is the JMX management interface for DN information. + */ +@InterfaceAudience.Private +public interface DNMXBean extends ServiceRuntimeInfo { +} diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java similarity index 64% rename from hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java index 0837200c1fac..18ad66ce5a69 100644 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java @@ -15,23 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.upgrade; -import java.util.concurrent.Callable; +package org.apache.hadoop.ozone; -import picocli.CommandLine.Command; +import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; +import org.apache.hadoop.hdds.utils.VersionInfo; /** - * Execute Ozone specific HDFS ballanced.. + * This is the JMX management class for DN information. */ -@Command(name = "execute", - description = "Start/restart upgrade from HDFS to Ozone cluster.") -public class Execute implements Callable { - - @Override - public Void call() throws Exception { - System.err.println("In-Place upgrade : execute] is not yet supported."); - return null; +public class DNMXBeanImpl extends ServiceRuntimeInfoImpl implements DNMXBean { + public DNMXBeanImpl( + VersionInfo versionInfo) { + super(versionInfo); } - } \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 7e896e715598..cfb22e30dcd2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -23,12 +23,14 @@ import java.security.KeyPair; import java.security.cert.CertificateException; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.ConcurrentHashMap; +import com.sun.jmx.mbeanserver.Introspector; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsUtils; @@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.server.http.RatisDropwizardExports; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.HddsVersionInfo; +import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; @@ -65,11 +68,15 @@ import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString; import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY; import static org.apache.hadoop.util.ExitUtil.terminate; + +import org.apache.hadoop.util.Time; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; +import javax.management.ObjectName; + /** * Datanode service plugin to start the HDDS container services. */ @@ -92,9 +99,13 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { private HddsDatanodeHttpServer httpServer; private boolean printBanner; private String[] args; - private volatile AtomicBoolean isStopped = new AtomicBoolean(false); + private final AtomicBoolean isStopped = new AtomicBoolean(false); private final Map ratisMetricsMap = new ConcurrentHashMap<>(); + private DNMXBeanImpl serviceRuntimeInfo = + new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) {}; + private ObjectName dnInfoBeanName; + //Constructor for DataNode PluginService public HddsDatanodeService(){} @@ -132,6 +143,7 @@ private static HddsDatanodeService createHddsDatanodeService( public static void main(String[] args) { try { + Introspector.checkCompliance(DNMXBeanImpl.class); HddsDatanodeService hddsDatanodeService = createHddsDatanodeService(args, true); hddsDatanodeService.run(args); @@ -180,6 +192,8 @@ public void start(OzoneConfiguration configuration) { } public void start() { + serviceRuntimeInfo.setStartTime(); + RatisDropwizardExports. registerRatisMetricReporters(ratisMetricsMap); @@ -191,6 +205,12 @@ public void start() { datanodeDetails = initializeDatanodeDetails(); datanodeDetails.setHostName(hostname); datanodeDetails.setIpAddress(ip); + datanodeDetails.setVersion( + HddsVersionInfo.HDDS_VERSION_INFO.getVersion()); + datanodeDetails.setSetupTime(Time.now()); + datanodeDetails.setRevision( + HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); + datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); @@ -245,7 +265,7 @@ public void start() { .equalsIgnoreCase(System.getenv("OZONE_DATANODE_STANDALONE_TEST"))) { startRatisForTest(); } - + registerMXBean(); } catch (IOException e) { throw new RuntimeException("Can't start the HDDS datanode plugin", e); } catch (AuthenticationException ex) { @@ -332,9 +352,15 @@ private void getSCMSignedCert(OzoneConfiguration config) { dnCertClient.storeCertificate(pemEncodedCert, true); dnCertClient.storeCertificate(response.getX509CACertificate(), true, true); - datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert). - getSerialNumber().toString()); + String dnCertSerialId = getX509Certificate(pemEncodedCert). + getSerialNumber().toString(); + datanodeDetails.setCertSerialId(dnCertSerialId); persistDatanodeDetails(datanodeDetails); + // Rebuild dnCertClient with the new CSR result so that the default + // certSerialId and the x509Certificate can be updated. + dnCertClient = new DNCertificateClient( + new SecurityConfig(config), dnCertSerialId); + } else { throw new RuntimeException("Unable to retrieve datanode certificate " + "chain"); @@ -345,6 +371,21 @@ private void getSCMSignedCert(OzoneConfiguration config) { } } + private void registerMXBean() { + Map jmxProperties = new HashMap<>(); + jmxProperties.put("component", "ServerRuntime"); + this.dnInfoBeanName = HddsUtils.registerWithJmxProperties( + "HddsDatanodeService", + "HddsDatanodeServiceInfo", jmxProperties, this.serviceRuntimeInfo); + } + + private void unregisterMXBean() { + if (this.dnInfoBeanName != null) { + MBeans.unregister(this.dnInfoBeanName); + this.dnInfoBeanName = null; + } + } + /** * Creates CSR for DN. * @param config @@ -490,8 +531,7 @@ public void terminateDatanode() { @Override public void stop() { - if (!isStopped.get()) { - isStopped.set(true); + if (!isStopped.getAndSet(true)) { if (plugins != null) { for (ServicePlugin plugin : plugins) { try { @@ -512,6 +552,7 @@ public void stop() { LOG.error("Stopping HttpServer is failed.", e); } } + unregisterMXBean(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 061d09bd4a5e..2ad7f0d49817 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -88,11 +88,14 @@ public StorageType getStorageType() { return storageType; } + private StorageTypeProto getStorageTypeProto() throws IOException { + return getStorageTypeProto(getStorageType()); + } - private StorageTypeProto getStorageTypeProto() throws - IOException { + public static StorageTypeProto getStorageTypeProto(StorageType type) + throws IOException { StorageTypeProto storageTypeProto; - switch (getStorageType()) { + switch (type) { case SSD: storageTypeProto = StorageTypeProto.SSD; break; @@ -145,7 +148,7 @@ private static StorageType getStorageType(StorageTypeProto proto) throws * @return SCMStorageReport * @throws IOException In case, the storage type specified is invalid. */ - public StorageReportProto getProtoBufMessage() throws IOException{ + public StorageReportProto getProtoBufMessage() throws IOException { StorageReportProto.Builder srb = StorageReportProto.newBuilder(); return srb.setStorageUuid(getId()) .setCapacity(getCapacity()) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java index 41fc26716c19..2cee75c00fe8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java @@ -45,7 +45,7 @@ public class TopNOrderedContainerDeletionChoosingPolicy private static final Comparator KEY_VALUE_CONTAINER_DATA_COMPARATOR = (KeyValueContainerData c1, KeyValueContainerData c2) -> - Integer.compare(c2.getNumPendingDeletionBlocks(), + Long.compare(c2.getNumPendingDeletionBlocks(), c1.getNumPendingDeletionBlocks()); @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 779b60a1d816..425074d6888a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -19,11 +19,13 @@ import java.io.Closeable; import java.io.IOException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; @@ -50,7 +52,6 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopExecutors; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -103,9 +104,10 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, this.hddsDatanodeStopService = hddsDatanodeStopService; this.conf = conf; this.datanodeDetails = datanodeDetails; - executorService = HadoopExecutors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Datanode State Machine Thread - %d").build()); + executorService = Executors.newFixedThreadPool( + getEndPointTaskThreadPoolSize(), + new ThreadFactoryBuilder() + .setNameFormat("Datanode State Machine Task Thread - %d").build()); connectionManager = new SCMConnectionManager(conf); context = new StateContext(this.conf, DatanodeStates.getInitState(), this); // OzoneContainer instance is used in a non-thread safe way by the context @@ -124,7 +126,9 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, ContainerReplicator replicator = new DownloadAndImportReplicator(container.getContainerSet(), container.getController(), - new SimpleContainerDownloader(conf), new TarContainerPacker()); + new SimpleContainerDownloader(conf, + dnCertClient != null ? dnCertClient.getCACertificate() : null), + new TarContainerPacker()); supervisor = new ReplicationSupervisor(container.getContainerSet(), replicator, @@ -155,6 +159,21 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, .build(); } + private int getEndPointTaskThreadPoolSize() { + // TODO(runzhiwang): current only support one recon, if support multiple + // recon in future reconServerCount should be the real number of recon + int reconServerCount = 1; + int totalServerCount = reconServerCount; + + try { + totalServerCount += HddsUtils.getSCMAddresses(conf).size(); + } catch (Exception e) { + LOG.error("Fail to get scm addresses", e); + } + + return totalServerCount; + } + /** * * Return DatanodeDetails if set, return null otherwise. @@ -207,19 +226,26 @@ private void start() throws IOException { nextHB.set(Time.monotonicNow() + heartbeatFrequency); context.execute(executorService, heartbeatFrequency, TimeUnit.MILLISECONDS); - now = Time.monotonicNow(); - if (now < nextHB.get()) { - if(!Thread.interrupted()) { - Thread.sleep(nextHB.get() - now); - } - } } catch (InterruptedException e) { // Some one has sent interrupt signal, this could be because // 1. Trigger heartbeat immediately // 2. Shutdown has be initiated. + LOG.warn("Interrupt the execution.", e); + Thread.currentThread().interrupt(); } catch (Exception e) { LOG.error("Unable to finish the execution.", e); } + + now = Time.monotonicNow(); + if (now < nextHB.get()) { + if(!Thread.interrupted()) { + try { + Thread.sleep(nextHB.get() - now); + } catch (InterruptedException e) { + LOG.warn("Interrupt the execution.", e); + } + } + } } // If we have got some exception in stateMachine we set the state to diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index f3a599d1f21e..4cd769f4d245 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; @@ -35,6 +36,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.function.Consumer; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction; @@ -51,6 +53,8 @@ import com.google.protobuf.GeneratedMessage; import static java.lang.Math.min; import org.apache.commons.collections.CollectionUtils; + +import static org.apache.hadoop.hdds.utils.HddsServerUtil.getLogWarnInterval; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmHeartbeatInterval; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,6 +78,7 @@ public class StateContext { private DatanodeStateMachine.DatanodeStates state; private boolean shutdownOnError = false; private boolean shutdownGracefully = false; + private final AtomicLong threadPoolNotAvailableCount; /** * Starting with a 2 sec heartbeat frequency which will be updated to the @@ -103,6 +108,7 @@ public StateContext(ConfigurationSource conf, pipelineActions = new HashMap<>(); lock = new ReentrantLock(); stateExecutionCount = new AtomicLong(0); + threadPoolNotAvailableCount = new AtomicLong(0); } /** @@ -393,6 +399,20 @@ public DatanodeState getTask() { } } + @VisibleForTesting + public boolean isThreadPoolAvailable(ExecutorService executor) { + if (!(executor instanceof ThreadPoolExecutor)) { + return true; + } + + ThreadPoolExecutor ex = (ThreadPoolExecutor) executor; + if (ex.getQueue().size() == 0) { + return true; + } + + return false; + } + /** * Executes the required state function. * @@ -415,6 +435,17 @@ public void execute(ExecutorService service, long time, TimeUnit unit) if (this.isEntering()) { task.onEnter(); } + + if (!isThreadPoolAvailable(service)) { + long count = threadPoolNotAvailableCount.getAndIncrement(); + if (count % getLogWarnInterval(conf) == 0) { + LOG.warn("No available thread in pool for past {} seconds.", + unit.toSeconds(time) * (count + 1)); + } + return; + } + + threadPoolNotAvailableCount.set(0); task.execute(service); DatanodeStateMachine.DatanodeStates newState = task.await(time, unit); if (this.state != newState) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java index c60c1129f563..78059fee78f3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java @@ -98,7 +98,8 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, d -> !d.getUuid().equals(dn.getUuid())) .forEach(d -> { final RaftPeer peer = RatisHelper.toRaftPeer(d); - try (RaftClient client = RatisHelper.newRaftClient(peer, conf)) { + try (RaftClient client = RatisHelper.newRaftClient(peer, conf, + ozoneContainer.getTlsClientConfig())) { client.groupAdd(group, peer.getId()); } catch (AlreadyExistsException ae) { // do not log diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 64cc804f0004..bd5e7b7c467c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -95,7 +95,6 @@ public void handle(SCMCommand command, OzoneContainer container, return; } LOG.debug("Processing block deletion command."); - invocationCount++; // move blocks to deleting state. // this is a metadata update, the actual deletion happens in another @@ -177,6 +176,7 @@ public void handle(SCMCommand command, OzoneContainer container, updateCommandStatus(context, command, statusUpdater, LOG); long endTime = Time.monotonicNow(); totalTime += endTime - startTime; + invocationCount++; } } @@ -253,28 +253,30 @@ private void deleteKeyValueContainerBlocks( } } - // Finally commit the DB counters. - BatchOperation batchOperation = new BatchOperation(); + if (newDeletionBlocks > 0) { + // Finally commit the DB counters. + BatchOperation batchOperation = new BatchOperation(); - // In memory is updated only when existing delete transactionID is - // greater. - if (delTX.getTxID() > containerData.getDeleteTransactionId()) { - // Update in DB pending delete key count and delete transaction ID. - batchOperation.put(DB_CONTAINER_DELETE_TRANSACTION_KEY, - Longs.toByteArray(delTX.getTxID())); - } + // In memory is updated only when existing delete transactionID is + // greater. + if (delTX.getTxID() > containerData.getDeleteTransactionId()) { + // Update in DB pending delete key count and delete transaction ID. + batchOperation.put(DB_CONTAINER_DELETE_TRANSACTION_KEY, + Longs.toByteArray(delTX.getTxID())); + } - batchOperation.put(DB_PENDING_DELETE_BLOCK_COUNT_KEY, Longs.toByteArray( - containerData.getNumPendingDeletionBlocks() + newDeletionBlocks)); + batchOperation.put(DB_PENDING_DELETE_BLOCK_COUNT_KEY, Longs.toByteArray( + containerData.getNumPendingDeletionBlocks() + newDeletionBlocks)); - containerDB.getStore().writeBatch(batchOperation); + containerDB.getStore().writeBatch(batchOperation); - // update pending deletion blocks count and delete transaction ID in - // in-memory container status - containerData.updateDeleteTransactionId(delTX.getTxID()); + // update pending deletion blocks count and delete transaction ID in + // in-memory container status + containerData.updateDeleteTransactionId(delTX.getTxID()); - containerData.incrPendingDeletionBlocks(newDeletionBlocks); + containerData.incrPendingDeletionBlocks(newDeletionBlocks); + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java index 8a9bcaff3afb..b0cfb4ce001a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.container.common.states.datanode; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; @@ -42,7 +43,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; /** * Class that implements handshake with SCM. @@ -152,6 +152,11 @@ public void execute(ExecutorService executor) { } } + @VisibleForTesting + public void setExecutorCompletionService(ExecutorCompletionService e) { + this.ecs = e; + } + private Callable getEndPointTask( EndpointStateMachine endpoint) { if (endpointTasks.containsKey(endpoint)) { @@ -200,10 +205,11 @@ private Callable getEndPointTask( @Override public DatanodeStateMachine.DatanodeStates await(long duration, TimeUnit timeUnit) - throws InterruptedException, ExecutionException, TimeoutException { + throws InterruptedException { int count = connectionManager.getValues().size(); int returned = 0; - long timeLeft = timeUnit.toMillis(duration); + long durationMS = timeUnit.toMillis(duration); + long timeLeft = durationMS; long startTime = Time.monotonicNow(); List> results = new LinkedList<>(); @@ -214,7 +220,7 @@ private Callable getEndPointTask( results.add(result); returned++; } - timeLeft = timeLeft - (Time.monotonicNow() - startTime); + timeLeft = durationMS - (Time.monotonicNow() - startTime); } return computeNextContainerState(results); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java index 01f463c5cddd..d8dfefdb5f64 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java @@ -24,7 +24,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReport; - +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import java.io.IOException; import java.util.Collection; import java.util.List; @@ -83,4 +84,13 @@ default void removeGroup(HddsProtos.PipelineID pipelineId) * @return list of report for each pipeline. */ List getPipelineReport(); + + /** + * Get storage report for the XceiverServer instance. + * @return list of report for each storage location. + */ + default List getStorageReport() throws + IOException { + return null; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index b1c8370a48d6..840d87ca6d8d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -103,7 +103,7 @@ * processed in 2 phases. The 2 phases are divided in * {@link #startTransaction(RaftClientRequest)}, in the first phase the user * data is written directly into the state machine via - * {@link #writeStateMachineData} and in the second phase the + * {@link #write} and in the second phase the * transaction is committed via {@link #applyTransaction(TransactionContext)} * * For the requests with no stateMachine data, the transaction is directly @@ -115,7 +115,7 @@ * the write chunk operation will fail otherwise as the container still hasn't * been created. Hence the create container operation has been split in the * {@link #startTransaction(RaftClientRequest)}, this will help in synchronizing - * the calls in {@link #writeStateMachineData} + * the calls in {@link #write} * * 2) Write chunk commit operation is executed after write chunk state machine * operation. This will ensure that commit operation is sync'd with the state @@ -517,7 +517,7 @@ private ExecutorService getChunkExecutor(WriteChunkRequestProto req) { * and also with applyTransaction. */ @Override - public CompletableFuture writeStateMachineData(LogEntryProto entry) { + public CompletableFuture write(LogEntryProto entry) { try { metrics.incNumWriteStateMachineOps(); long writeStateMachineStartTime = Time.monotonicNowNanos(); @@ -618,7 +618,7 @@ private ByteString readStateMachineData( * @return Combined future of all writeChunks till the log index given. */ @Override - public CompletableFuture flushStateMachineData(long index) { + public CompletableFuture flush(long index) { List> futureList = writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) .map(Map.Entry::getValue).collect(Collectors.toList()); @@ -632,7 +632,7 @@ public CompletableFuture flushStateMachineData(long index) { * evicted. */ @Override - public CompletableFuture readStateMachineData( + public CompletableFuture read( LogEntryProto entry) { StateMachineLogEntryProto smLogEntryProto = entry.getStateMachineLogEntry(); metrics.incNumReadStateMachineOps(); @@ -833,7 +833,7 @@ private static CompletableFuture completeExceptionally(Exception e) { } @Override - public CompletableFuture truncateStateMachineData(long index) { + public CompletableFuture truncate(long index) { stateMachineDataCache.removeIf(k -> k >= index); return CompletableFuture.completedFuture(null); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 53fa2d8e5263..c1d8df66e318 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -30,6 +29,7 @@ import java.util.Objects; import java.util.Set; import java.util.UUID; +import java.util.EnumMap; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; @@ -46,6 +46,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; @@ -59,11 +60,14 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; @@ -136,6 +140,11 @@ private static long nextCallId() { // Timeout used while calling submitRequest directly. private long requestTimeout; + /** + * Maintains a list of active volumes per StorageType. + */ + private EnumMap> ratisVolumeMap; + private XceiverServerRatis(DatanodeDetails dd, int port, ContainerDispatcher dispatcher, ContainerController containerController, StateContext context, GrpcTlsConfig tlsConfig, ConfigurationSource conf) @@ -163,6 +172,7 @@ private XceiverServerRatis(DatanodeDetails dd, int port, HddsConfigKeys.HDDS_DATANODE_RATIS_SERVER_REQUEST_TIMEOUT, HddsConfigKeys.HDDS_DATANODE_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); + initializeRatisVolumeMap(); } private ContainerStateMachine getStateMachine(RaftGroupId gid) { @@ -213,9 +223,12 @@ private RaftProperties newRaftProperties() { setNodeFailureTimeout(properties); // Set the ratis storage directory - String storageDir = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); - RaftServerConfigKeys.setStorageDir(properties, - Collections.singletonList(new File(storageDir))); + Collection storageDirPaths = + HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); + List storageDirs= new ArrayList<>(storageDirPaths.size()); + storageDirPaths.stream().forEach(d -> storageDirs.add(new File(d))); + + RaftServerConfigKeys.setStorageDir(properties, storageDirs); // For grpc set the maximum message size GrpcConfigKeys.setMessageSizeMax(properties, @@ -409,12 +422,14 @@ public static XceiverServerRatis newXceiverServerRatis( // In summary: // authenticate from server to client is via TLS. // authenticate from client to server is via block token (or container token). + // DN Ratis server act as both SSL client and server and we must pass TLS + // configuration for both. static GrpcTlsConfig createTlsServerConfigForDN(SecurityConfig conf, CertificateClient caClient) { if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) { return new GrpcTlsConfig( caClient.getPrivateKey(), caClient.getCertificate(), - null, false); + caClient.getCACertificate(), false); } return null; } @@ -526,6 +541,43 @@ public void submitRequest(ContainerCommandRequestProto request, } } + private void initializeRatisVolumeMap() throws IOException { + ratisVolumeMap = new EnumMap<>(StorageType.class); + Collection rawLocations = HddsServerUtil. + getOzoneDatanodeRatisDirectory(conf); + + for (String locationString : rawLocations) { + try { + StorageLocation location = StorageLocation.parse(locationString); + StorageType type = location.getStorageType(); + ratisVolumeMap.computeIfAbsent(type, k -> new ArrayList(1)); + ratisVolumeMap.get(location.getStorageType()). + add(location.getUri().getPath()); + + } catch (IOException e) { + LOG.error("Failed to parse the storage location: " + + locationString, e); + } + } + } + + @Override + public List getStorageReport() + throws IOException { + List reportProto = new ArrayList<>(); + for (StorageType storageType : ratisVolumeMap.keySet()) { + for (String path : ratisVolumeMap.get(storageType)) { + MetadataStorageReportProto.Builder builder = MetadataStorageReportProto. + newBuilder(); + builder.setStorageLocation(path); + builder.setStorageType(StorageLocationReport. + getStorageTypeProto(storageType)); + reportProto.add(builder.build()); + } + } + return reportProto; + } + private RaftClientRequest createRaftClientRequest( ContainerCommandRequestProto request, HddsProtos.PipelineID pipelineID, RaftClientRequest.Type type) { @@ -552,6 +604,14 @@ private void handlePipelineFailure(RaftGroupId groupId, msg = datanode + " is in candidate state for " + roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs() + "ms"; break; + case FOLLOWER: + msg = datanode + " closes pipeline when installSnapshot from leader " + + "because leader snapshot doesn't contain any data to replay, " + + "all the log entries prior to the snapshot might have been purged." + + "So follower should not try to install snapshot from leader but" + + "can close the pipeline here. It's in follower state for " + + roleInfoProto.getRoleElapsedTimeMs() + "ms"; + break; case LEADER: StringBuilder sb = new StringBuilder(); sb.append(datanode).append(" has not seen follower/s"); @@ -782,8 +842,10 @@ private static List createChunkExecutors( ConfigurationSource conf) { // TODO create single pool with N threads if using non-incremental chunks final int threadCountPerDisk = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT); + OzoneConfigKeys + .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + OzoneConfigKeys + .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = MutableVolumeSet.getDatanodeStorageDirs(conf).size(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index d2d29018b32a..f4d8f43f7065 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -23,6 +23,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import com.google.common.util.concurrent.Striped; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.MetadataStore; import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; @@ -43,12 +44,14 @@ public final class ContainerCache extends LRUMap { private final Lock lock = new ReentrantLock(); private static ContainerCache cache; private static final float LOAD_FACTOR = 0.75f; + private final Striped rocksDBLock; /** * Constructs a cache that holds DBHandle references. */ - private ContainerCache(int maxSize, float loadFactor, boolean + private ContainerCache(int maxSize, int stripes, float loadFactor, boolean scanUntilRemovable) { super(maxSize, loadFactor, scanUntilRemovable); + rocksDBLock = Striped.lazyWeakLock(stripes); } /** @@ -63,7 +66,10 @@ public synchronized static ContainerCache getInstance( if (cache == null) { int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, OzoneConfigKeys.OZONE_CONTAINER_CACHE_DEFAULT); - cache = new ContainerCache(cacheSize, LOAD_FACTOR, true); + int stripes = conf.getInt( + OzoneConfigKeys.OZONE_CONTAINER_CACHE_LOCK_STRIPES, + OzoneConfigKeys.OZONE_CONTAINER_CACHE_LOCK_STRIPES_DEFAULT); + cache = new ContainerCache(cacheSize, stripes, LOAD_FACTOR, true); } return cache; } @@ -117,30 +123,57 @@ public ReferenceCountedDB getDB(long containerID, String containerDBType, throws IOException { Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); - lock.lock(); + ReferenceCountedDB db; + Lock containerLock = rocksDBLock.get(containerDBPath); + containerLock.lock(); try { - ReferenceCountedDB db = (ReferenceCountedDB) this.get(containerDBPath); + lock.lock(); + try { + db = (ReferenceCountedDB) this.get(containerDBPath); + if (db != null) { + db.incrementReference(); + return db; + } + } finally { + lock.unlock(); + } - if (db == null) { + try { MetadataStore metadataStore = MetadataStoreBuilder.newBuilder() - .setDbFile(new File(containerDBPath)) - .setCreateIfMissing(false) - .setConf(conf) - .setDBType(containerDBType) - .build(); + .setDbFile(new File(containerDBPath)) + .setCreateIfMissing(false) + .setConf(conf) + .setDBType(containerDBType) + .build(); db = new ReferenceCountedDB(metadataStore, containerDBPath); - this.put(containerDBPath, db); + } catch (Exception e) { + LOG.error("Error opening DB. Container:{} ContainerPath:{}", + containerID, containerDBPath, e); + throw e; + } + + lock.lock(); + try { + ReferenceCountedDB currentDB = + (ReferenceCountedDB) this.get(containerDBPath); + if (currentDB != null) { + // increment the reference before returning the object + currentDB.incrementReference(); + // clean the db created in previous step + db.cleanup(); + return currentDB; + } else { + this.put(containerDBPath, db); + // increment the reference before returning the object + db.incrementReference(); + return db; + } + } finally { + lock.unlock(); } - // increment the reference before returning the object - db.incrementReference(); - return db; - } catch (Exception e) { - LOG.error("Error opening DB. Container:{} ContainerPath:{}", - containerID, containerDBPath, e); - throw e; } finally { - lock.unlock(); + containerLock.unlock(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index bc61811c868b..b8c606738ef1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -36,8 +36,6 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.common.InconsistentStorageStateException; import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; @@ -487,7 +485,7 @@ public Map> getVolumeStateMap() { return ImmutableMap.copyOf(volumeStateMap); } - public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() + public StorageLocationReport[] getStorageReport() throws IOException { boolean failed; this.readLock(); @@ -540,11 +538,7 @@ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() StorageLocationReport r = builder.build(); reports[counter++] = r; } - NodeReportProto.Builder nrb = NodeReportProto.newBuilder(); - for (int i = 0; i < reports.length; i++) { - nrb.addStorageReport(reports[i].getProtoBufMessage()); - } - return nrb.build(); + return reports; } finally { this.readUnlock(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 49b907f10ec5..a80841f60035 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; @@ -129,12 +128,8 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath, chunksPath, dbFile, config); - String impl = config.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); - //Set containerData for the KeyValueContainer. containerData.setChunksPath(chunksPath.getPath()); - containerData.setContainerDBType(impl); containerData.setDbFile(dbFile); containerData.setVolume(containerVolume); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 95795e64c953..d6c4ff0c5575 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -47,8 +47,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_LEVELDB; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; /** * Class to run integrity checks on Datanode Containers. @@ -186,8 +186,8 @@ private void checkContainerFile() throws IOException { } dbType = onDiskContainerData.getContainerDBType(); - if (!dbType.equals(OZONE_METADATA_STORE_IMPL_ROCKSDB) && - !dbType.equals(OZONE_METADATA_STORE_IMPL_LEVELDB)) { + if (!dbType.equals(CONTAINER_DB_TYPE_ROCKSDB) && + !dbType.equals(CONTAINER_DB_TYPE_LEVELDB)) { String errStr = "Unknown DBType [" + dbType + "] in Container File for [" + containerID + "]"; throw new IOException(errStr); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 373b3223a68d..68f01fbc437b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -38,9 +38,10 @@ import java.io.File; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import static java.lang.Math.max; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE_ROCKSDB; import static org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COUNT_KEY; import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; import static org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_BYTES_USED_KEY; @@ -65,14 +66,14 @@ public class KeyValueContainerData extends ContainerData { private String metadataPath; //Type of DB used to store key to chunks mapping - private String containerDBType; + private String containerDBType = CONTAINER_DB_TYPE_ROCKSDB; private File dbFile = null; /** * Number of pending deletion blocks in KeyValueContainer. */ - private final AtomicInteger numPendingDeletionBlocks; + private final AtomicLong numPendingDeletionBlocks; private long deleteTransactionId; @@ -97,7 +98,7 @@ public KeyValueContainerData(long id, ChunkLayOutVersion layOutVersion, long size, String originPipelineId, String originNodeId) { super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion, size, originPipelineId, originNodeId); - this.numPendingDeletionBlocks = new AtomicInteger(0); + this.numPendingDeletionBlocks = new AtomicLong(0); this.deleteTransactionId = 0; } @@ -105,7 +106,7 @@ public KeyValueContainerData(ContainerData source) { super(source); Preconditions.checkArgument(source.getContainerType() == ContainerProtos.ContainerType.KeyValueContainer); - this.numPendingDeletionBlocks = new AtomicInteger(0); + this.numPendingDeletionBlocks = new AtomicLong(0); this.deleteTransactionId = 0; } @@ -187,7 +188,7 @@ public void setContainerDBType(String containerDBType) { * * @param numBlocks increment number */ - public void incrPendingDeletionBlocks(int numBlocks) { + public void incrPendingDeletionBlocks(long numBlocks) { this.numPendingDeletionBlocks.addAndGet(numBlocks); } @@ -196,14 +197,14 @@ public void incrPendingDeletionBlocks(int numBlocks) { * * @param numBlocks decrement number */ - public void decrPendingDeletionBlocks(int numBlocks) { + public void decrPendingDeletionBlocks(long numBlocks) { this.numPendingDeletionBlocks.addAndGet(-1 * numBlocks); } /** * Get the number of pending deletion blocks. */ - public int getNumPendingDeletionBlocks() { + public long getNumPendingDeletionBlocks() { return this.numPendingDeletionBlocks.get(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 53797b08b241..e0de6ff90f86 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -420,14 +420,16 @@ ContainerCommandResponseProto handlePutBlock( BlockData blockData = BlockData.getFromProtoBuf(data); Preconditions.checkNotNull(blockData); + boolean incrKeyCount = false; if (!request.getPutBlock().hasEof() || request.getPutBlock().getEof()) { chunkManager.finishWriteChunks(kvContainer, blockData); + incrKeyCount = true; } long bcsId = dispatcherContext == null ? 0 : dispatcherContext.getLogIndex(); blockData.setBlockCommitSequenceId(bcsId); - blockManager.putBlock(kvContainer, blockData); + blockManager.putBlock(kvContainer, blockData, incrKeyCount); blockDataProto = blockData.getProtoBufMessage(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 2141bed143a1..2958e9e5d4f5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -23,7 +23,6 @@ import java.nio.file.Paths; import java.util.List; -import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -171,7 +170,7 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, containerDB.getStore().get(DB_PENDING_DELETE_BLOCK_COUNT_KEY); if (pendingDeleteBlockCount != null) { kvContainerData.incrPendingDeletionBlocks( - Ints.fromByteArray(pendingDeleteBlockCount)); + Longs.fromByteArray(pendingDeleteBlockCount)); } else { // Set pending deleted block count. MetadataKeyFilters.KeyPrefixFilter filter = @@ -231,10 +230,22 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, private static void initializeUsedBytesAndBlockCount( KeyValueContainerData kvContainerData) throws IOException { + MetadataKeyFilters.KeyPrefixFilter filter = + new MetadataKeyFilters.KeyPrefixFilter(); + + // Ignore all blocks except those with no prefix, or those with + // #deleting# prefix. + filter.addFilter(OzoneConsts.DELETED_KEY_PREFIX, true) + .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true) + .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true) + .addFilter(OzoneConsts.BLOCK_COUNT, true) + .addFilter(OzoneConsts.CONTAINER_BYTES_USED, true) + .addFilter(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, true); + long blockCount = 0; try (KeyValueBlockIterator blockIter = new KeyValueBlockIterator( kvContainerData.getContainerID(), - new File(kvContainerData.getContainerPath()))) { + new File(kvContainerData.getContainerPath()), filter)) { long usedBytes = 0; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 9544e29c1ecf..51fa1c9614f6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -83,6 +83,20 @@ public BlockManagerImpl(ConfigurationSource conf) { * @throws IOException */ public long putBlock(Container container, BlockData data) throws IOException { + return putBlock(container, data, true); + } + /** + * Puts or overwrites a block. + * + * @param container - Container for which block need to be added. + * @param data - BlockData. + * @param incrKeyCount - for FilePerBlockStrategy, increase key count only + * when the whole block file is written. + * @return length of the block. + * @throws IOException + */ + public long putBlock(Container container, BlockData data, + boolean incrKeyCount) throws IOException { Preconditions.checkNotNull(data, "BlockData cannot be null for put " + "operation."); Preconditions.checkState(data.getContainerID() >= 0, "Container Id " + @@ -129,14 +143,18 @@ public long putBlock(Container container, BlockData data) throws IOException { Longs.toByteArray(container.getContainerData().getBytesUsed())); // Set Block Count for a container. - batch.put(DB_BLOCK_COUNT_KEY, - Longs.toByteArray(container.getContainerData().getKeyCount() + 1)); + if (incrKeyCount) { + batch.put(DB_BLOCK_COUNT_KEY, + Longs.toByteArray(container.getContainerData().getKeyCount() + 1)); + } db.getStore().writeBatch(batch); container.updateBlockCommitSequenceId(bcsId); // Increment block count finally here for in-memory. - container.getContainerData().incrKeyCount(); + if (incrKeyCount) { + container.getContainerData().incrKeyCount(); + } if (LOG.isDebugEnabled()) { LOG.debug( "Block " + data.getBlockID() + " successfully committed with bcsId " diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java index 6812b0d8ff8e..72b104025b59 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java @@ -40,6 +40,18 @@ public interface BlockManager { */ long putBlock(Container container, BlockData data) throws IOException; + /** + * Puts or overwrites a block. + * + * @param container - Container for which block need to be added. + * @param data - Block Data. + * @param incrKeyCount - Whether to increase container key count. + * @return length of the Block. + * @throws IOException + */ + long putBlock(Container container, BlockData data, boolean incrKeyCount) + throws IOException; + /** * Gets an existing block. * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 1b9b3d690724..fa63cf1b862e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -120,6 +120,7 @@ public boolean accept(File pathname) { return; } + LOG.info("Start to verify containers on volume {}", hddsVolumeRootDir); for (File scmLoc : scmDir) { File currentDir = new File(scmLoc, Storage.STORAGE_DIR_CURRENT); File[] containerTopDirs = currentDir.listFiles(); @@ -144,6 +145,7 @@ public boolean accept(File pathname) { } } } + LOG.info("Finish verifying containers on volume {}", hddsVolumeRootDir); } private void verifyContainerFile(long containerID, File containerFile) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index bbbec25af783..26da4873dcdb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -33,12 +33,14 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.security.token.BlockTokenVerifier; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; +import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -58,6 +60,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; +import org.apache.ratis.grpc.GrpcTlsConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -81,6 +84,7 @@ public class OzoneContainer { private ContainerMetadataScanner metadataScanner; private List dataScanners; private final BlockDeletingService blockDeletingService; + private final GrpcTlsConfig tlsClientConfig; /** * Construct OzoneContainer object. @@ -148,6 +152,12 @@ public OzoneContainer(DatanodeDetails datanodeDetails, ConfigurationSource blockDeletingService = new BlockDeletingService(this, svcInterval, serviceTimeout, TimeUnit.MILLISECONDS, config); + tlsClientConfig = RatisHelper.createTlsClientConfig( + secConf, certClient != null ? certClient.getCACertificate() : null); + } + + public GrpcTlsConfig getTlsClientConfig() { + return tlsClientConfig; } private GrpcReplicationService createReplicationService() { @@ -162,6 +172,7 @@ private void buildContainerSet() { Iterator volumeSetIterator = volumeSet.getVolumesList() .iterator(); ArrayList volumeThreads = new ArrayList(); + long startTime = System.currentTimeMillis(); //TODO: diskchecker should be run before this, to see how disks are. // And also handle disk failure tolerance need to be added @@ -182,6 +193,8 @@ private void buildContainerSet() { Thread.currentThread().interrupt(); } + LOG.info("Build ContainerSet costs {}s", + (System.currentTimeMillis() - startTime) / 1000); } /** @@ -294,8 +307,21 @@ public ContainerController getController() { * Returns node report of container storage usage. */ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() - throws IOException { - return volumeSet.getNodeReport(); + throws IOException { + StorageLocationReport[] reports = volumeSet.getStorageReport(); + StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb + = StorageContainerDatanodeProtocolProtos. + NodeReportProto.newBuilder(); + for (int i = 0; i < reports.length; i++) { + nrb.addStorageReport(reports[i].getProtoBufMessage()); + } + List metadataReport = + writeChannel.getStorageReport(); + if (metadataReport != null) { + nrb.addAllMetadataStorageReport(metadataReport); + } + return nrb.build(); } @VisibleForTesting diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java index 660ba4ee639b..abeaf03c1667 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java @@ -24,6 +24,7 @@ import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.cert.X509Certificate; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; @@ -37,10 +38,13 @@ .IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceStub; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.ratis.thirdparty.io.grpc.ManagedChannel; +import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts; import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder; import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; +import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,13 +62,26 @@ public class GrpcReplicationClient { private final Path workingDirectory; - public GrpcReplicationClient(String host, - int port, Path workingDir) { + public GrpcReplicationClient(String host, int port, Path workingDir, + SecurityConfig secConfig, X509Certificate caCert) throws IOException { + NettyChannelBuilder channelBuilder = + NettyChannelBuilder.forAddress(host, port) + .usePlaintext() + .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE); - channel = NettyChannelBuilder.forAddress(host, port) - .usePlaintext() - .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) - .build(); + if (secConfig.isGrpcTlsEnabled()) { + channelBuilder.useTransportSecurity(); + + SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient(); + if (caCert != null) { + sslContextBuilder.trustManager(caCert); + } + if (secConfig.useTestCert()) { + channelBuilder.overrideAuthority("localhost"); + } + channelBuilder.sslContext(sslContextBuilder.build()); + } + channel = channelBuilder.build(); client = IntraDatanodeProtocolServiceGrpc.newStub(channel); workingDirectory = workingDir; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java index d7666ea1127b..9d7b5516a5c3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java @@ -18,8 +18,10 @@ package org.apache.hadoop.ozone.container.replication; +import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import java.security.cert.X509Certificate; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.function.Function; @@ -27,6 +29,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.slf4j.Logger; @@ -45,9 +48,11 @@ public class SimpleContainerDownloader implements ContainerDownloader { LoggerFactory.getLogger(SimpleContainerDownloader.class); private final Path workingDirectory; + private final SecurityConfig securityConfig; + private final X509Certificate caCert; - public SimpleContainerDownloader(ConfigurationSource conf) { - + public SimpleContainerDownloader(ConfigurationSource conf, + X509Certificate caCert) { String workDirString = conf.get(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR); @@ -57,6 +62,8 @@ public SimpleContainerDownloader(ConfigurationSource conf) { } else { workingDirectory = Paths.get(workDirString); } + securityConfig = new SecurityConfig(conf); + this.caCert = caCert; } @Override @@ -66,22 +73,27 @@ public CompletableFuture getContainerDataFromReplicas(long containerId, CompletableFuture result = null; for (DatanodeDetails datanode : sourceDatanodes) { try { - if (result == null) { GrpcReplicationClient grpcReplicationClient = new GrpcReplicationClient(datanode.getIpAddress(), datanode.getPort(Name.STANDALONE).getValue(), - workingDirectory); + workingDirectory, securityConfig, caCert); result = grpcReplicationClient.download(containerId); } else { result = result.thenApply(CompletableFuture::completedFuture) .exceptionally(t -> { LOG.error("Error on replicating container: " + containerId, t); - GrpcReplicationClient grpcReplicationClient = - new GrpcReplicationClient(datanode.getIpAddress(), - datanode.getPort(Name.STANDALONE).getValue(), - workingDirectory); - return grpcReplicationClient.download(containerId); + try { + GrpcReplicationClient grpcReplicationClient = + new GrpcReplicationClient(datanode.getIpAddress(), + datanode.getPort(Name.STANDALONE).getValue(), + workingDirectory, securityConfig, caCert); + return grpcReplicationClient.download(containerId); + } catch (IOException e) { + LOG.error("Error on replicating container: " + containerId, + t); + return null; + } }).thenCompose(Function.identity()); } } catch (Exception ex) { diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html new file mode 100644 index 000000000000..d4f7a17c0b76 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html @@ -0,0 +1,21 @@ + + + + + +
diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js new file mode 100644 index 000000000000..c43eb42bdc25 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +(function () { + "use strict"; + angular.module('dn', ['ozone', 'nvd3']); + + angular.module('dn').component('dnOverview', { + templateUrl: 'dn-overview.html', + require: { + overview: "^overview" + }, + controller: function ($http) { + var ctrl = this; + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=StorageContainerMetrics") + .then(function (result) { + ctrl.dnmetrics = result.data.beans[0]; + }); + } + }); +})(); diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html new file mode 100644 index 000000000000..b1f703c0d659 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html @@ -0,0 +1,76 @@ + + + + + + + + + + + HDDS Datanode Service + + + + + + + + + + + +

+ +
+ + + +
+ + + + + + + + + + + + diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/main.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/main.html new file mode 100644 index 000000000000..c639b0bb2b50 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/main.html @@ -0,0 +1,20 @@ + + + + + diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 45fe38b937ae..b8ebaecf282d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -583,6 +583,11 @@ public static String getFixedLengthString(String string, int length) { private static RaftServerImpl getRaftServerImpl(HddsDatanodeService dn, Pipeline pipeline) throws Exception { + if (!pipeline.getNodes().contains(dn.getDatanodeDetails())) { + throw new IllegalArgumentException("Pipeline:" + pipeline.getId() + + " not exist in datanode:" + dn.getDatanodeDetails().getUuid()); + } + XceiverServerSpi server = dn.getDatanodeStateMachine(). getContainer().getWriteChannel(); RaftServerProxy proxy = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index b8843dea87de..ba6999d05016 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -27,7 +27,6 @@ import java.util.concurrent.TimeoutException; import com.google.common.collect.Lists; -import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; @@ -180,7 +179,7 @@ private void createToDeleteBlocks(ContainerSet containerSet, metadata.getStore().put(OzoneConsts.DB_CONTAINER_BYTES_USED_KEY, Longs.toByteArray(blockLength * numOfBlocksPerContainer)); metadata.getStore().put(DB_PENDING_DELETE_BLOCK_COUNT_KEY, - Ints.toByteArray(numOfBlocksPerContainer)); + Longs.toByteArray(numOfBlocksPerContainer)); } } } @@ -251,6 +250,8 @@ public void testBlockDeletion() throws Exception { // Ensure there are 3 blocks under deletion and 0 deleted blocks Assert.assertEquals(3, getUnderDeletionBlocksCount(meta)); + Assert.assertEquals(3, Longs.fromByteArray( + meta.getStore().get(DB_PENDING_DELETE_BLOCK_COUNT_KEY))); Assert.assertEquals(0, getDeletedBlocksCount(meta)); // An interval will delete 1 * 2 blocks @@ -269,7 +270,7 @@ public void testBlockDeletion() throws Exception { // Check finally DB counters. // Not checking bytes used, as handler is a mock call. - Assert.assertEquals(0, Ints.fromByteArray( + Assert.assertEquals(0, Longs.fromByteArray( meta.getStore().get(DB_PENDING_DELETE_BLOCK_COUNT_KEY))); Assert.assertEquals(0, Longs.fromByteArray( meta.getStore().get(DB_BLOCK_COUNT_KEY))); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java index 947a087cb82e..2e389903769f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java @@ -31,6 +31,13 @@ import org.junit.rules.ExpectedException; import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; /** @@ -63,6 +70,8 @@ public void testContainerCacheEviction() throws Exception { conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); ContainerCache cache = ContainerCache.getInstance(conf); + cache.clear(); + Assert.assertEquals(0, cache.size()); File containerDir1 = new File(root, "cont1"); File containerDir2 = new File(root, "cont2"); File containerDir3 = new File(root, "cont3"); @@ -123,4 +132,47 @@ public void testContainerCacheEviction() throws Exception { thrown.expect(IllegalArgumentException.class); db5.close(); } + + @Test + public void testConcurrentDBGet() throws Exception { + File root = new File(testRoot); + root.mkdirs(); + root.deleteOnExit(); + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); + ContainerCache cache = ContainerCache.getInstance(conf); + cache.clear(); + Assert.assertEquals(0, cache.size()); + File containerDir = new File(root, "cont1"); + createContainerDB(conf, containerDir); + ExecutorService executorService = Executors.newFixedThreadPool(2); + Runnable task = () -> { + try { + ReferenceCountedDB db1 = cache.getDB(1, "RocksDB", + containerDir.getPath(), conf); + Assert.assertNotNull(db1); + } catch (IOException e) { + Assert.fail("Should get the DB instance"); + } + }; + List futureList = new ArrayList<>(); + futureList.add(executorService.submit(task)); + futureList.add(executorService.submit(task)); + for (Future future: futureList) { + try { + future.get(); + } catch (InterruptedException| ExecutionException e) { + Assert.fail("Should get the DB instance"); + } + } + + ReferenceCountedDB db = cache.getDB(1, "RocksDB", + containerDir.getPath(), conf); + db.close(); + db.close(); + db.close(); + Assert.assertEquals(1, cache.size()); + db.cleanup(); + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java index 545d6702c479..d3032c3211f5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java @@ -28,10 +28,13 @@ import java.net.InetSocketAddress; import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -39,6 +42,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates; import org.apache.hadoop.ozone.container.common.states.DatanodeState; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.Assert; import org.junit.Test; import com.google.protobuf.GeneratedMessage; @@ -182,4 +187,84 @@ public DatanodeStates await(long time, TimeUnit timeUnit) assertEquals(DatanodeStates.SHUTDOWN, subject.getState()); } + @Test + public void testIsThreadPoolAvailable() throws Exception { + StateContext stateContext = new StateContext(null, null, null); + + int threadPoolSize = 2; + ExecutorService executorService = Executors.newFixedThreadPool( + threadPoolSize); + + CompletableFuture futureOne = new CompletableFuture<>(); + CompletableFuture futureTwo = new CompletableFuture<>(); + + // task num greater than pool size + for (int i = 0; i < threadPoolSize; i++) { + executorService.submit(() -> futureOne.get()); + } + executorService.submit(() -> futureTwo.get()); + + Assert.assertFalse(stateContext.isThreadPoolAvailable(executorService)); + + futureOne.complete("futureOne"); + LambdaTestUtils.await(1000, 100, () -> + stateContext.isThreadPoolAvailable(executorService)); + + futureTwo.complete("futureTwo"); + executorService.shutdown(); + } + + @Test + public void doesNotAwaitWithoutExecute() throws Exception { + final AtomicInteger executed = new AtomicInteger(); + final AtomicInteger awaited = new AtomicInteger(); + + ExecutorService executorService = Executors.newFixedThreadPool(1); + CompletableFuture future = new CompletableFuture<>(); + executorService.submit(() -> future.get()); + executorService.submit(() -> future.get()); + + StateContext subject = new StateContext(new OzoneConfiguration(), + DatanodeStates.INIT, mock(DatanodeStateMachine.class)) { + @Override + public DatanodeState getTask() { + // this task counts the number of execute() and await() calls + return new DatanodeState() { + @Override + public void onEnter() { + // no-op + } + + @Override + public void onExit() { + // no-op + } + + @Override + public void execute(ExecutorService executor) { + executed.incrementAndGet(); + } + + @Override + public DatanodeStates await(long time, TimeUnit timeUnit) { + awaited.incrementAndGet(); + return DatanodeStates.INIT; + } + }; + } + }; + + subject.execute(executorService, 2, TimeUnit.SECONDS); + + assertEquals(0, awaited.get()); + assertEquals(0, executed.get()); + + future.complete("any"); + LambdaTestUtils.await(1000, 100, () -> + subject.isThreadPoolAvailable(executorService)); + + subject.execute(executorService, 2, TimeUnit.SECONDS); + assertEquals(1, awaited.get()); + assertEquals(1, executed.get()); + } } \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/datanode/TestRunningDatanodeState.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/datanode/TestRunningDatanodeState.java new file mode 100644 index 000000000000..9fb4307793b0 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/datanode/TestRunningDatanodeState.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.states.datanode; + +import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.util.Time; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine.EndPointStates.SHUTDOWN; +import static org.mockito.Mockito.when; + +/** + * Test class for RunningDatanodeState. + */ +public class TestRunningDatanodeState { + @Test + public void testAwait() throws InterruptedException { + SCMConnectionManager connectionManager = + Mockito.mock(SCMConnectionManager.class); + List stateMachines = new ArrayList<>(); + when(connectionManager.getValues()).thenReturn(stateMachines); + + RunningDatanodeState state = + new RunningDatanodeState(null, connectionManager, null); + + int threadPoolSize = 2; + ExecutorService executorService = Executors.newFixedThreadPool( + threadPoolSize); + + ExecutorCompletionService ecs = + new ExecutorCompletionService<>(executorService); + state.setExecutorCompletionService(ecs); + + for (int i = 0; i < threadPoolSize; i++) { + stateMachines.add(new EndpointStateMachine(null, null, null)); + } + + CompletableFuture futureOne = + new CompletableFuture<>(); + for (int i = 0; i < threadPoolSize; i++) { + ecs.submit(() -> futureOne.get()); + } + + long startTime = Time.monotonicNow(); + state.await(500, TimeUnit.MILLISECONDS); + long endTime = Time.monotonicNow(); + Assert.assertTrue((endTime - startTime) >= 500); + + futureOne.complete(SHUTDOWN); + + CompletableFuture futureTwo = + new CompletableFuture<>(); + for (int i = 0; i < threadPoolSize; i++) { + ecs.submit(() -> futureTwo.get()); + } + futureTwo.complete(SHUTDOWN); + + startTime = Time.monotonicNow(); + state.await(500, TimeUnit.MILLISECONDS); + endTime = Time.monotonicNow(); + Assert.assertTrue((endTime - startTime) < 500); + + executorService.shutdown(); + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 62097b38e8b3..aff0528bdef4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -45,9 +45,6 @@ import com.google.common.primitives.Longs; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; import org.junit.After; @@ -71,22 +68,18 @@ public class TestKeyValueBlockIterator { private OzoneConfiguration conf; private File testRoot; - private final String storeImpl; private final ChunkLayOutVersion layout; - public TestKeyValueBlockIterator(String metadataImpl, - ChunkLayOutVersion layout) { - this.storeImpl = metadataImpl; + public TestKeyValueBlockIterator(ChunkLayOutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {OZONE_METADATA_STORE_IMPL_LEVELDB, FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_LEVELDB, FILE_PER_BLOCK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, FILE_PER_BLOCK}}); + {FILE_PER_CHUNK}, + {FILE_PER_BLOCK} + }); } @Before @@ -94,7 +87,6 @@ public void setUp() throws Exception { testRoot = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index cb8ef3406c63..4583a54f5c54 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -58,10 +58,6 @@ import java.util.UUID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -76,7 +72,6 @@ private static final Logger LOG = LoggerFactory.getLogger(TestKeyValueContainerCheck.class); - private final String storeImpl; private final ChunkLayoutTestInfo chunkManagerTestInfo; private KeyValueContainer container; private KeyValueContainerData containerData; @@ -85,28 +80,22 @@ private File testRoot; private ChunkManager chunkManager; - public TestKeyValueContainerCheck(String metadataImpl, - ChunkLayoutTestInfo chunkManagerTestInfo) { - this.storeImpl = metadataImpl; + public TestKeyValueContainerCheck(ChunkLayoutTestInfo chunkManagerTestInfo) { this.chunkManagerTestInfo = chunkManagerTestInfo; } @Parameterized.Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {OZONE_METADATA_STORE_IMPL_LEVELDB, ChunkLayoutTestInfo.FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_LEVELDB, ChunkLayoutTestInfo.FILE_PER_BLOCK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, ChunkLayoutTestInfo.FILE_PER_CHUNK}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB, ChunkLayoutTestInfo.FILE_PER_BLOCK} + {ChunkLayoutTestInfo.FILE_PER_CHUNK}, + {ChunkLayoutTestInfo.FILE_PER_BLOCK} }); } @Before public void setUp() throws Exception { - LOG.info("Testing store:{} layout:{}", - storeImpl, chunkManagerTestInfo.getLayout()); + LOG.info("Testing layout:{}", chunkManagerTestInfo.getLayout()); this.testRoot = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); chunkManagerTestInfo.updateConfig(conf); volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf); chunkManager = chunkManagerTestInfo.createChunkManager(true, null); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index e1c5f33ff4af..0014a754899b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -22,15 +22,16 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; @@ -51,7 +52,6 @@ import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COUNT_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_BYTES_USED_KEY; import static org.apache.hadoop.ozone.OzoneConsts.DB_PENDING_DELETE_BLOCK_COUNT_KEY; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; @@ -68,7 +68,7 @@ public class TestContainerReader { private MutableVolumeSet volumeSet; private HddsVolume hddsVolume; private ContainerSet containerSet; - private ConfigurationSource conf; + private OzoneConfiguration conf; private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; @@ -138,17 +138,10 @@ private void markBlocksForDelete(KeyValueContainer keyValueContainer, } if (setMetaData) { + // Pending delete blocks are still counted towards the block count + // and bytes used metadata values, so those do not change. metadataStore.getStore().put(DB_PENDING_DELETE_BLOCK_COUNT_KEY, Longs.toByteArray(count)); - long blkCount = Longs.fromByteArray( - metadataStore.getStore().get(DB_BLOCK_COUNT_KEY)); - metadataStore.getStore().put(DB_BLOCK_COUNT_KEY, - Longs.toByteArray(blkCount - count)); - long bytesUsed = Longs.fromByteArray( - metadataStore.getStore().get(DB_CONTAINER_BYTES_USED_KEY)); - metadataStore.getStore().put(DB_CONTAINER_BYTES_USED_KEY, - Longs.toByteArray(bytesUsed - (count * blockLen))); - } } @@ -209,14 +202,78 @@ public void testContainerReader() throws Exception { keyValueContainer.getContainerData(); // Verify block related metadata. - Assert.assertEquals(blockCount - i, + Assert.assertEquals(blockCount, keyValueContainerData.getKeyCount()); - Assert.assertEquals((blockCount - i) * blockLen, + Assert.assertEquals(blockCount * blockLen, keyValueContainerData.getBytesUsed()); Assert.assertEquals(i, keyValueContainerData.getNumPendingDeletionBlocks()); } } + + @Test + public void testMultipleContainerReader() throws Exception { + final int volumeNum = 10; + StringBuffer datanodeDirs = new StringBuffer(); + File[] volumeDirs = new File[volumeNum]; + for (int i = 0; i < volumeNum; i++) { + volumeDirs[i] = tempDir.newFolder(); + datanodeDirs = datanodeDirs.append(volumeDirs[i]).append(","); + } + conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, + datanodeDirs.toString()); + MutableVolumeSet volumeSets = + new MutableVolumeSet(datanodeId.toString(), conf); + ContainerCache cache = ContainerCache.getInstance(conf); + cache.clear(); + + RoundRobinVolumeChoosingPolicy policy = + new RoundRobinVolumeChoosingPolicy(); + + final int containerCount = 100; + blockCount = containerCount; + for (int i = 0; i < containerCount; i++) { + KeyValueContainerData keyValueContainerData = + new KeyValueContainerData(i, ChunkLayOutVersion.FILE_PER_BLOCK, + (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), + datanodeId.toString()); + + KeyValueContainer keyValueContainer = + new KeyValueContainer(keyValueContainerData, + conf); + keyValueContainer.create(volumeSets, policy, scmId); + + List blkNames; + if (i % 2 == 0) { + blkNames = addBlocks(keyValueContainer, true); + markBlocksForDelete(keyValueContainer, true, blkNames, i); + } else { + blkNames = addBlocks(keyValueContainer, false); + markBlocksForDelete(keyValueContainer, false, blkNames, i); + } + } + + List hddsVolumes = volumeSets.getVolumesList(); + ContainerReader[] containerReaders = new ContainerReader[volumeNum]; + Thread[] threads = new Thread[volumeNum]; + for (int i = 0; i < volumeNum; i++) { + containerReaders[i] = new ContainerReader(volumeSets, + hddsVolumes.get(i), containerSet, conf); + threads[i] = new Thread(containerReaders[i]); + } + long startTime = System.currentTimeMillis(); + for (int i = 0; i < volumeNum; i++) { + threads[i].start(); + } + for (int i = 0; i < volumeNum; i++) { + threads[i].join(); + } + System.out.println("Open " + volumeNum + " Volume with " + containerCount + + " costs " + (System.currentTimeMillis() - startTime) / 1000 + "s"); + Assert.assertEquals(containerCount, + containerSet.getContainerMap().entrySet().size()); + Assert.assertEquals(containerCount, cache.size()); + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1056a0d727b4..2bb52f65dc8f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; @@ -45,10 +46,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; +import org.junit.*; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -169,6 +167,51 @@ public void testBuildContainerMap() throws Exception { verifyCommittedSpace(ozoneContainer); } + @Test + public void testBuildNodeReport() throws Exception { + String path = folder.getRoot() + .getAbsolutePath(); + conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + String.join(",", + path + "/ratis1", path + "/ratis2", path + "ratis3")); + DatanodeStateMachine stateMachine = Mockito.mock( + DatanodeStateMachine.class); + StateContext context = Mockito.mock(StateContext.class); + Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); + Mockito.when(context.getParent()).thenReturn(stateMachine); + // When OzoneContainer is started, the containers from disk should be + // loaded into the containerSet. + // Also expected to initialize committed space for each volume. + OzoneContainer ozoneContainer = new + OzoneContainer(datanodeDetails, conf, context, null); + Assert.assertEquals(volumeSet.getVolumesList().size(), + ozoneContainer.getNodeReport().getStorageReportList().size()); + Assert.assertEquals(3, + ozoneContainer.getNodeReport().getMetadataStorageReportList() + .size()); + + } + + @Test + public void testBuildNodeReportWithDefaultRatisLogDir() throws Exception { + DatanodeStateMachine stateMachine = Mockito.mock( + DatanodeStateMachine.class); + StateContext context = Mockito.mock(StateContext.class); + Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); + Mockito.when(context.getParent()).thenReturn(stateMachine); + // When OzoneContainer is started, the containers from disk should be + // loaded into the containerSet. + // Also expected to initialize committed space for each volume. + OzoneContainer ozoneContainer = new + OzoneContainer(datanodeDetails, conf, context, null); + Assert.assertEquals(volumeSet.getVolumesList().size(), + ozoneContainer.getNodeReport().getStorageReportList().size()); + Assert.assertEquals(1, + ozoneContainer.getNodeReport().getMetadataStorageReportList() + .size()); + } + + @Test public void testContainerCreateDiskFull() throws Exception { long containerSize = (long) StorageUnit.MB.toBytes(100); diff --git a/hadoop-hdds/docs/content/_index.md b/hadoop-hdds/docs/content/_index.md index 52e190cf99a4..9bc7a7ae695a 100644 --- a/hadoop-hdds/docs/content/_index.md +++ b/hadoop-hdds/docs/content/_index.md @@ -1,4 +1,5 @@ --- +name: Ozone title: Overview menu: main weight: -10 @@ -29,7 +30,7 @@ Apart from scaling to billions of objects of varying sizes, Ozone can function effectively in containerized environments like Kubernetes._*

-Applications like Apache Spark, Hive and YARN, work without any modifications when using Ozone. Ozone comes with a [Java client library]({{}}), [S3 protocol support]({{< ref "S3.md" >}}), and a [command line interface]({{< ref "shell/_index.md" >}}) which makes it easy to use Ozone. +Applications like Apache Spark, Hive and YARN, work without any modifications when using Ozone. Ozone comes with a [Java client library]({{}}), [S3 protocol support]({{< ref "S3.md" >}}), and a [command line interface]({{< ref "Cli.md" >}}) which makes it easy to use Ozone. Ozone consists of volumes, buckets, and keys: diff --git a/hadoop-hdds/docs/content/_index.zh.md b/hadoop-hdds/docs/content/_index.zh.md index 8bdcf5044454..689490be11ad 100644 --- a/hadoop-hdds/docs/content/_index.zh.md +++ b/hadoop-hdds/docs/content/_index.zh.md @@ -28,7 +28,7 @@ weight: -10 Ozone 不仅能存储数十亿个不同大小的对象,还支持在容器化环境(比如 Kubernetes)中运行。_*

Apache Spark、Hive 和 YARN 等应用无需任何修改即可使用 Ozone。Ozone 提供了 [Java API]({{< -ref "JavaApi.zh.md" >}})、[S3 接口]({{< ref "S3.zh.md" >}})和[命令行接口]({{< ref "shell/_index.zh.md" >}}),极大地方便了 Ozone +ref "JavaApi.zh.md" >}})、[S3 接口]({{< ref "S3.zh.md" >}})和命令行接口,极大地方便了 Ozone 在不同应用场景下的的使用。 Ozone 的管理由卷、桶和键组成: diff --git a/hadoop-hdds/docs/content/beyond/Containers.md b/hadoop-hdds/docs/content/beyond/Containers.md deleted file mode 100644 index 13a66d801f5d..000000000000 --- a/hadoop-hdds/docs/content/beyond/Containers.md +++ /dev/null @@ -1,234 +0,0 @@ ---- -title: "Ozone Containers" -summary: Ozone uses containers extensively for testing. This page documents the usage and best practices of Ozone. -weight: 2 ---- - - -Docker heavily is used at the ozone development with three principal use-cases: - -* __dev__: - * We use docker to start local pseudo-clusters (docker provides unified environment, but no image creation is required) -* __test__: - * We create docker images from the dev branches to test ozone in kubernetes and other container orchestrator system - * We provide _apache/ozone_ images for each release to make it easier for evaluation of Ozone. - These images are __not__ created __for production__ usage. - -

- -* __production__: - * We have documentation on how you can create your own docker image for your production cluster. - -Let's check out each of the use-cases in more detail: - -## Development - -Ozone artifact contains example docker-compose directories to make it easier to start Ozone cluster in your local machine. - -From distribution: - -```bash -cd compose/ozone -docker-compose up -d -``` - -After a local build: - -```bash -cd hadoop-ozone/dist/target/ozone-*/compose -docker-compose up -d -``` - -These environments are very important tools to start different type of Ozone clusters at any time. - -To be sure that the compose files are up-to-date, we also provide acceptance test suites which start -the cluster and check the basic behaviour. - -The acceptance tests are part of the distribution, and you can find the test definitions in `smoketest` directory. - -You can start the tests from any compose directory: - -For example: - -```bash -cd compose/ozone -./test.sh -``` - -### Implementation details - -`compose` tests are based on the apache/hadoop-runner docker image. The image itself does not contain -any Ozone jar file or binary just the helper scripts to start ozone. - -hadoop-runner provdes a fixed environment to run Ozone everywhere, but the ozone distribution itself -is mounted from the including directory: - -(Example docker-compose fragment) - -``` - scm: - image: apache/hadoop-runner:jdk11 - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - -``` - -The containers are configured based on environment variables, but because the same environment -variables should be set for each containers we maintain the list of the environment variables -in a separated file: - -``` - scm: - image: apache/hadoop-runner:jdk11 - #... - env_file: - - ./docker-config -``` - -The docker-config file contains the list of the required environment variables: - -``` -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -#... -``` - -As you can see we use naming convention. Based on the name of the environment variable, the -appropriate hadoop config XML (`ozone-site.xml` in our case) will be generated by a -[script](https://github.com/apache/hadoop/tree/docker-hadoop-runner-latest/scripts) which is -included in the `hadoop-runner` base image. - -The [entrypoint](https://github.com/apache/hadoop/blob/docker-hadoop-runner-latest/scripts/starter.sh) -of the `hadoop-runner` image contains a helper shell script which triggers this transformation and -can do additional actions (eg. initialize scm/om storage, download required keytabs, etc.) -based on environment variables. - -## Test/Staging - -The `docker-compose` based approach is recommended only for local test, not for multi node cluster. -To use containers on a multi-node cluster we need a Container Orchestrator like Kubernetes. - -Kubernetes example files are included in the `kubernetes` folder. - -*Please note*: all the provided images are based the `hadoop-runner` image which contains all the -required tool for testing in staging environments. For production we recommend to create your own, -hardened image with your own base image. - -### Test the release - -The release can be tested with deploying any of the example clusters: - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -Plese note that in this case the latest released container will be downloaded from the dockerhub. - -### Test the development build - -To test a development build you can create your own image and upload it to your own docker registry: - - -```bash -mvn clean install -DskipTests -Pdocker-build,docker-push -Ddocker.image=myregistry:9000/name/ozone -``` - -The configured image will be used in all the generated kubernetes resources files (`image:` keys are adjusted during the build) - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -## Production - - - -You can use the source of our development images as an example: - - * [Base image](https://github.com/apache/hadoop/blob/docker-hadoop-runner-jdk11/Dockerfile) - * [Docker image](https://github.com/apache/hadoop/blob/trunk/hadoop-ozone/dist/src/main/docker/Dockerfile) - - Most of the elements are optional and just helper function but to use the provided example - kubernetes resources you may need the scripts from - [here](https://github.com/apache/hadoop/tree/docker-hadoop-runner-jdk11/scripts) - - * The two python scripts convert environment variables to real hadoop XML config files - * The start.sh executes the python scripts (and other initialization) based on environment variables. - -## Containers - -Ozone related container images and source locations: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ContainerRepositoryBaseBranchTagsComments
1apache/ozonehttps://github.com/apache/hadoop-docker-ozoneozone-... hadoop-runner0.3.0,0.4.0,0.4.1For each Ozone release we create new release tag.
2apache/hadoop-runner https://github.com/apache/hadoopdocker-hadoop-runnercentosjdk11,jdk8,latestThis is the base image used for testing Hadoop Ozone. - This is a set of utilities that make it easy for us run ozone.
diff --git a/hadoop-hdds/docs/content/beyond/Containers.zh.md b/hadoop-hdds/docs/content/beyond/Containers.zh.md deleted file mode 100644 index c06902e04a36..000000000000 --- a/hadoop-hdds/docs/content/beyond/Containers.zh.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: "Ozone 中的容器技术" -summary: Ozone 广泛地使用容器来进行测试,本页介绍 Ozone 中容器的使用及其最佳实践。 -weight: 2 ---- - - -Ozone 的开发中大量地使用了 Docker,包括以下三种主要的应用场景: - -* __开发__: - * 我们使用 docker 来启动本地伪集群(docker 可以提供统一的环境,但是不需要创建镜像)。 -* __测试__: - * 我们从开发分支创建 docker 镜像,然后在 kubernetes 或其它容器编排系统上测试 ozone。 - * 我们为每个发行版提供了 _apache/ozone_ 镜像,以方便用户体验 Ozone。 - 这些镜像 __不__ 应当在 __生产__ 中使用。 - - - -* __生产__: - * 我们提供了如何为生产集群创建 docker 镜像的文档。 - -下面我们来详细地介绍一下各种应用场景: - -## 开发 - -Ozone 安装包中包含了 docker-compose 的示例目录,用于方便地在本地机器启动 Ozone 集群。 - -使用官方提供的发行包: - -```bash -cd compose/ozone -docker-compose up -d -``` - -本地构建方式: - -```bash -cd hadoop-ozone/dist/target/ozone-*/compose -docker-compose up -d -``` - -这些 compose 环境文件是重要的工具,可以用来随时启动各种类型的 Ozone 集群。 - -为了确保 compose 文件是最新的,我们提供了验收测试套件,套件会启动集群并检查其基本行为是否正常。 - -验收测试也包含在发行包中,你可以在 `smoketest` 目录下找到各个测试的定义。 - -你可以在任意 compose 目录进行测试,比如: - -```bash -cd compose/ozone -./test.sh -``` - -### 实现细节 - -`compose` 测试都基于 apache/hadoop-runner 镜像,这个镜像本身并不包含任何 Ozone 的 jar 包或二进制文件,它只是提供其了启动 Ozone 的辅助脚本。 - -hadoop-runner 提供了一个随处运行 Ozone 的固定环境,Ozone 分发包通过目录挂载包含在其中。 - -(docker-compose 示例片段) - -``` - scm: - image: apache/hadoop-runner:jdk11 - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - -``` - -容器应该通过环境变量来进行配置,由于每个容器都应当设置相同的环境变量,我们在单独的文件中维护了一个环境变量列表: - -``` - scm: - image: apache/hadoop-runner:jdk11 - #... - env_file: - - ./docker-config -``` - -docker-config 文件中包含了所需环境变量的列表: - -``` -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -#... -``` - -你可以看到我们所使用的命名规范,根据这些环境变量的名字,`hadoop-runner` 基础镜像中的[脚本](https://github.com/apache/hadoop/tree/docker-hadoop-runner-latest/scripts) 会生成合适的 hadoop XML 配置文件(在我们这种情况下就是 `ozone-site.xml`)。 - -`hadoop-runner` 镜像的[入口点](https://github.com/apache/hadoop/blob/docker-hadoop-runner-latest/scripts/starter -.sh)包含了一个辅助脚本,这个辅助脚本可以根据环境变量触发上述的配置文件生成以及其它动作(比如初始化 SCM 和 OM 的存储、下载必要的 keytab 等)。 - -## 测试 - -`docker-compose` 的方式应当只用于本地测试,不适用于多节点集群。要在多节点集群上使用容器,我们需要像 Kubernetes 这样的容器编排系统。 - -Kubernetes 示例文件在 `kubernetes` 文件夹中。 - -*请注意*:所有提供的镜像都使用 `hadoop-runner` 作为基础镜像,这个镜像中包含了所有测试环境所需的测试工具。对于生产环境,我们推荐用户使用自己的基础镜像创建可靠的镜像。 - -### 发行包测试 - -可以通过部署任意的示例集群来测试发行包: - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -注意,在这个例子中会从 Docker Hub 下载最新的镜像。 - -### 开发构建测试 - -为了测试开发中的构建,你需要创建自己的镜像并上传到自己的 docker 仓库中: - - -```bash -mvn clean install -DskipTests -Pdocker-build,docker-push -Ddocker.image=myregistry:9000/name/ozone -``` - -所有生成的 kubernetes 资源文件都会使用这个镜像 (`image:` keys are adjusted during the build) - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -## 生产 - - - -你可以使用我们开发中所用的镜像作为示例: - - * [基础镜像] (https://github.com/apache/hadoop/blob/docker-hadoop-runner-jdk11/Dockerfile) - * [完整镜像] (https://github.com/apache/hadoop/blob/trunk/hadoop-ozone/dist/src/main/docker/Dockerfile) - - Dockerfile 中大部分内容都是可选的辅助功能,但如果要使用我们提供的 kubernetes 示例资源文件,你可能需要[这里](https://github.com/apache/hadoop/tree/docker-hadoop-runner-jdk11/scripts)的脚本。 - - * 两个 python 脚本将环境变量转化为实际的 hadoop XML 配置文件 - * start.sh 根据环境变量执行 python 脚本(以及其它初始化工作) - -## 容器 - -Ozone 相关的容器镜像和 Dockerfile 位置: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#容器仓库基础镜像分支标签说明
1apache/ozonehttps://github.com/apache/hadoop-docker-ozoneozone-... hadoop-runner0.3.0,0.4.0,0.4.1每个 Ozone 发行版都对应一个新标签。
2apache/hadoop-runner https://github.com/apache/hadoopdocker-hadoop-runnercentosjdk11,jdk8,latest这是用于测试 Hadoop Ozone 的基础镜像,包含了一系列可以让我们更加方便地运行 Ozone 的工具。 -
diff --git a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md b/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md deleted file mode 100644 index f4f5492cf177..000000000000 --- a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Docker Cheat Sheet" -date: 2017-08-10 -summary: Docker Compose cheat sheet to help you remember the common commands to control an Ozone cluster running on top of Docker. -weight: 4 ---- - - - -In the `compose` directory of the ozone distribution there are multiple pseudo-cluster setup which -can be used to run Ozone in different way (for example: secure cluster, with tracing enabled, -with prometheus etc.). - -If the usage is not document in a specific directory the default usage is the following: - -```bash -cd compose/ozone -docker-compose up -d -``` - -The data of the container is ephemeral and deleted together with the docker volumes. -```bash -docker-compose down -``` - -## Useful Docker & Ozone Commands - -If you make any modifications to ozone, the simplest way to test it is to run freon and unit tests. - -Here are the instructions to run freon in a docker-based cluster. - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} - -This will open a bash shell on the data node container. -Now we can execute freon for load generation. - -{{< highlight bash >}} -ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10 -{{< /highlight >}} - -Here is a set of helpful commands for working with docker for ozone. -To check the status of the components: - -{{< highlight bash >}} -docker-compose ps -{{< /highlight >}} - -To get logs from a specific node/service: - -{{< highlight bash >}} -docker-compose logs scm -{{< /highlight >}} - - -As the WebUI ports are forwarded to the external machine, you can check the web UI: - -* For the Storage Container Manager: http://localhost:9876 -* For the Ozone Manager: http://localhost:9874 -* For the Datanode: check the port with `docker ps` (as there could be multiple data nodes, ports are mapped to the ephemeral port range) - -You can start multiple data nodes with: - -{{< highlight bash >}} -docker-compose scale datanode=3 -{{< /highlight >}} - -You can test the commands from the [Ozone CLI]({{< ref "shell/_index.md" >}}) after opening a new bash shell in one of the containers: - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} diff --git a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.zh.md b/hadoop-hdds/docs/content/beyond/DockerCheatSheet.zh.md deleted file mode 100644 index 0a37f9ba0714..000000000000 --- a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.zh.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Docker 速查表" -date: 2017-08-10 -summary: Docker Compose 速查表帮助你记住一些操作在 Docker 上运行的 Ozone 集群的常用命令。 -weight: 4 ---- - - - -Ozone 发行包中的 `compose` 目录包含了多种伪集群配置,可以用来以多种方式运行 Ozone(比如:安全集群,启用追踪功能,启用 prometheus 等)。 - -如果目录下没有额外的使用说明,默认的用法如下: - -```bash -cd compose/ozone -docker-compose up -d -``` - -容器中的数据没有持久化,在集群关闭时会和 docker 卷一起被删除。 -```bash -docker-compose down -``` - -## Docker 和 Ozone 实用命令 - -如果你对 Ozone 做了修改,最简单的测试方法是运行 freon 和单元测试。 - -下面是在基于 docker 的集群中运行 freon 的命令。 - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} - -这会在数据节点的容器中打开一个 bash shell,接下来我们执行 freon 来生成负载。 - -{{< highlight bash >}} -ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10 -{{< /highlight >}} - -下面是一些与 docker 有关的实用命令。 -检查各组件的状态: - -{{< highlight bash >}} -docker-compose ps -{{< /highlight >}} - -获取指定节点/服务中的日志: - -{{< highlight bash >}} -docker-compose logs scm -{{< /highlight >}} - - -因为 WebUI 的端口已经被转发到外部机器,你可以查看 web UI: - -* 对于 Storage Container Manager:http://localhost:9876 -* 对于 Ozone Manager:http://localhost:9874 -* 对于 数据节点:使用 `docker ps` 查看端口(因为可能会有多个数据节点,它们的端口被映射到一个临时的端口) - -你也可以启动多个数据节点: - -{{< highlight bash >}} -docker-compose scale datanode=3 -{{< /highlight >}} - -在一个容器中打开 bash shell 后,你也可以对 [Ozone 命令行接口]({{< ref "shell/_index.zh.md" >}})中的命令进行测试。 - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} diff --git a/hadoop-hdds/docs/content/beyond/_index.md b/hadoop-hdds/docs/content/beyond/_index.md deleted file mode 100644 index 2a29a5810aab..000000000000 --- a/hadoop-hdds/docs/content/beyond/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Beyond Basics" -date: "2017-10-10" -menu: main -weight: 7 - ---- - - -{{}} - Beyond Basics pages go into custom configurations of Ozone, including how - to run Ozone concurrently with an existing HDFS cluster. These pages also - take deep into how to run profilers and leverage tracing support built into - Ozone. -{{}} diff --git a/hadoop-hdds/docs/content/beyond/_index.zh.md b/hadoop-hdds/docs/content/beyond/_index.zh.md deleted file mode 100644 index b7f6775674e2..000000000000 --- a/hadoop-hdds/docs/content/beyond/_index.zh.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "进阶" -date: "2017-10-10" -menu: main -weight: 7 - ---- - - -{{}} - 本部分介绍 Ozone 的自定义配置,包括如何将 Ozone 以并存的方式部署到已有的 HDFS 集群,以及如何运行 Ozone 内置的 profilers 和 tracing 功能。 -{{}} diff --git a/hadoop-hdds/docs/content/concept/Containers.md b/hadoop-hdds/docs/content/concept/Containers.md new file mode 100644 index 000000000000..4e46acc5a280 --- /dev/null +++ b/hadoop-hdds/docs/content/concept/Containers.md @@ -0,0 +1,47 @@ +--- +title: Containers +weight: 5 +menu: + main: + parent: Architecture +summary: Description of the Containers, the replication unit of Ozone. +--- + + + +Containers are the fundamental replication unit of Ozone/HDDS, they are managed by the Storage Container Manager (SCM) service. + +Containers are big binary units (5Gb by default) which can contain multiple blocks: + +![Containers](Containers.png) + +Blocks are local information and not managed by SCM. Therefore even if billions of small files are created in the system (which means billions of blocks are created), only of the status of the containers will be reported by the Datanodes and containers will be replicated. + +When Ozone Manager requests a new Block allocation from the SCM, SCM will identify the suitable container and generate a block id which contains `ContainerId` + `LocalId`. Client will connect to the Datanode which stores the Container, and datanode can manage the separated block based on the `LocalId`. + +## Open vs. Closed containers + +When a container is created it starts in an OPEN state. When it's full (~5GB data is written), container will be closed and becomes a CLOSED container. + +The fundamental differences between OPEN and CLOSED containers: + +OPEN | CLOSED +-----------------------------------|----------------------------------------- +mutable | immutable +replicated with RAFT (Ratis) | Replicated with async container copy +Raft leader is used to READ / WRITE | All the nodes can be used to READ diff --git a/hadoop-hdds/docs/content/concept/Containers.png b/hadoop-hdds/docs/content/concept/Containers.png new file mode 100644 index 000000000000..3d2df0f313bc Binary files /dev/null and b/hadoop-hdds/docs/content/concept/Containers.png differ diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md index f7b27297eda5..1910d6c6b53b 100644 --- a/hadoop-hdds/docs/content/concept/Datanodes.md +++ b/hadoop-hdds/docs/content/concept/Datanodes.md @@ -1,7 +1,10 @@ --- title: "Datanodes" date: "2017-09-14" -weight: 4 +weight: 7 +menu: + main: + parent: Architecture summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, You can use S3 clients and S3 SDK based applications without any modifications with Ozone. --- - -Storage container manager provides multiple critical functions for the Ozone -cluster. SCM acts as the cluster manager, Certificate authority, Block -manager and the Replica manager. - -{{}} -SCM is in charge of creating an Ozone cluster. When an SCM is booted up via init command, SCM creates the cluster identity and root certificates needed for the SCM certificate authority. SCM manages the life cycle of a data node in the cluster. -{{}} - -{{}} -SCM's Ceritificate authority is in -charge of issuing identity certificates for each and every -service in the cluster. This certificate infrastructre makes -it easy to enable mTLS at network layer and also the block -token infrastructure depends on this certificate infrastructure. -{{}} - -{{}} -SCM is the block manager. SCM -allocates blocks and assigns them to data nodes. Clients -read and write these blocks directly. -{{}} - - -{{}} -SCM keeps track of all the block -replicas. If there is a loss of data node or a disk, SCM -detects it and instructs data nodes make copies of the -missing blocks to ensure high avialablity. -{{}} diff --git a/hadoop-hdds/docs/content/concept/Overview.md b/hadoop-hdds/docs/content/concept/Overview.md index 23fcda2325ae..f478734124ec 100644 --- a/hadoop-hdds/docs/content/concept/Overview.md +++ b/hadoop-hdds/docs/content/concept/Overview.md @@ -2,6 +2,11 @@ title: Overview date: "2017-10-10" weight: 1 +menu: + main: + name: "ArchitectureOverview" + title: "Overview" + parent: Architecture summary: Ozone's overview and components that make up Ozone. --- @@ -29,7 +34,7 @@ scale to billions of objects. Ozone separates namespace management and block space management; this helps ozone to scale much better. The namespace is managed by a daemon called [Ozone Manager ]({{< ref "OzoneManager.md" >}}) (OM), and block space is -managed by [Storage Container Manager]({{< ref "Hdds.md" >}}) (SCM). +managed by [Storage Container Manager]({{< ref "StorageContainerManager.md" >}}) (SCM). Ozone consists of volumes, buckets, and keys. diff --git a/hadoop-hdds/docs/content/concept/Overview.zh.md b/hadoop-hdds/docs/content/concept/Overview.zh.md index de16738a423c..042651ed1b2f 100644 --- a/hadoop-hdds/docs/content/concept/Overview.zh.md +++ b/hadoop-hdds/docs/content/concept/Overview.zh.md @@ -24,7 +24,7 @@ summary: 介绍 Ozone 的整体和各个组件。 Ozone 是一个分布式、多副本的对象存储系统,并针对大数据场景进行了专门的优化。Ozone 主要围绕可扩展性进行设计,目标是十亿数量级以上的对象存储。 -Ozone 通过对命名空间与块空间的管理进行分离,大大增加了其可扩展性,其中命名空间由 [Ozone Manager ]({{< ref "OzoneManager.zh.md" >}})(OM)管理,块空间由 [Storage Container Manager] ({{< ref "Hdds.zh.md" >}})(SCM)管理。 +Ozone 通过对命名空间与块空间的管理进行分离,大大增加了其可扩展性,其中命名空间由 [Ozone Manager ]({{< ref "OzoneManager.zh.md" >}})(OM)管理,块空间由 [Storage Container Manager] ({{< ref "StorageContainerManager.zh.md" >}})(SCM)管理。 Ozone 的管理由卷、桶和键组成。卷类似于个人主目录,只有管理员可以创建。 diff --git a/hadoop-hdds/docs/content/concept/OzoneManager-ReadPath.png b/hadoop-hdds/docs/content/concept/OzoneManager-ReadPath.png new file mode 100644 index 000000000000..5e68f6fc1cd6 Binary files /dev/null and b/hadoop-hdds/docs/content/concept/OzoneManager-ReadPath.png differ diff --git a/hadoop-hdds/docs/content/concept/OzoneManager-WritePath.png b/hadoop-hdds/docs/content/concept/OzoneManager-WritePath.png new file mode 100644 index 000000000000..924b61c31a23 Binary files /dev/null and b/hadoop-hdds/docs/content/concept/OzoneManager-WritePath.png differ diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.md b/hadoop-hdds/docs/content/concept/OzoneManager.md index 1ebdd4951d20..f0711ed21d0d 100644 --- a/hadoop-hdds/docs/content/concept/OzoneManager.md +++ b/hadoop-hdds/docs/content/concept/OzoneManager.md @@ -2,6 +2,9 @@ title: "Ozone Manager" date: "2017-09-14" weight: 2 +menu: + main: + parent: Architecture summary: Ozone Manager is the principal name space service of Ozone. OM manages the life cycle of volumes, buckets and Keys. --- +![Ozone Manager](OzoneManager.png) + Ozone Manager (OM) is the namespace manager for Ozone. This means that when you want to write some data, you ask Ozone @@ -55,6 +60,8 @@ understood if we trace what happens during a key write and key read. ### Key Write +![Write Path](OzoneManager-WritePath.png) + * To write a key to Ozone, a client tells Ozone manager that it would like to write a key into a bucket that lives inside a specific volume. Once Ozone Manager determines that you are allowed to write a key to the specified bucket, @@ -73,15 +80,65 @@ to the client. the block and writes data to the data node. * Once the write is complete on the data node, the client will update the block -information on -Ozone manager. - +information on Ozone manager. ### Key Reads +![Read Path](OzoneManager-ReadPath.png) + * Key reads are simpler, the client requests the block list from the Ozone Manager * Ozone manager will return the block list and block tokens which allows the client to read the data from data nodes. * Client connects to the data node and presents the block token and reads the data from the data node. + +## Main components of the Ozone Manager + +For a detailed view of Ozone Manager this section gives a quick overview about the provided network services and the stored persisted data. + +**Network services provided by Ozone Manager:** + +Ozone provides a network service for the client and for administration commands. The main service calls + + * Key, Bucket, Volume / CRUD + * Multipart upload (Initiate, Complete…) + * Supports upload of huge files in multiple steps + * FS related calls (optimized for hierarchical queries instead of a flat ObjectStore namespace) + * GetFileStatus, CreateDirectory, CreateFile, LookupFile + * ACL related + * Managing ACLs if [internal ACLs]({{< ref "security/SecurityAcls.md" >}}) are used instead of [Ranger]({{< ref "security/SecurityWithRanger.md" >}}) + * Delegation token (Get / Renew / Cancel) + * For security + * Admin APIs + * Get S3 secret + * ServiceList (used for service discovery) + * DBUpdates (used by [Recon]({{< ref "feature/Recon.md" >}}) downloads snapshots) + +**Persisted state** + +The following data is persisted in Ozone Manager side in a specific RocksDB directory: + + * Volume / Bucket / Key tables + * This is the main responsibility of OM + * Key metadata contains the block id (which includes container id) to find the data + * OpenKey table + * for keys which are created, but not yet committed + * Delegation token table + * for security + * PrefixInfo table + * specific index table to store directory level ACL and to provide better performance for hierarchical queries + * S3 secret table + * For S# secret management + * Multipart info table + * Inflight uploads should be tracked + * Deleted table + * To track the blocks which should be deleted from the datanodes + +## Notable configuration + +key | default | description | +----|-------------|-------- +ozone.om.address | 0.0.0.0:9862 | RPC address of the OM. Required by the client. +ozone.om.http-address | 0.0.0.0:9874 | Default port of the HTTP server. +ozone.metadata.dirs | none | Directory to store persisted data (RocksDB). diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.png b/hadoop-hdds/docs/content/concept/OzoneManager.png new file mode 100644 index 000000000000..f71bfacc4121 Binary files /dev/null and b/hadoop-hdds/docs/content/concept/OzoneManager.png differ diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.zh.md b/hadoop-hdds/docs/content/concept/OzoneManager.zh.md index 5e9ab7f23d0e..27b33c5fe8db 100644 --- a/hadoop-hdds/docs/content/concept/OzoneManager.zh.md +++ b/hadoop-hdds/docs/content/concept/OzoneManager.zh.md @@ -21,6 +21,12 @@ summary: Ozone Manager 是 Ozone 主要的命名空间服务,它管理了卷 limitations under the License. --> +
+ +注意:本页面翻译的信息可能滞后,最新的信息请参看英文版的相关页面。 + +
+ Ozone Manager(OM)管理 Ozone 的命名空间。 当向 Ozone 写入数据时,你需要向 OM 请求一个块,OM 会返回一个块并记录下相关信息。当你想要读取那个文件时,你也需要先通过 OM 获取那个块的地址。 diff --git a/hadoop-hdds/docs/content/concept/StorageContainerManager.md b/hadoop-hdds/docs/content/concept/StorageContainerManager.md new file mode 100644 index 000000000000..c0a7c0bd9ee8 --- /dev/null +++ b/hadoop-hdds/docs/content/concept/StorageContainerManager.md @@ -0,0 +1,99 @@ +--- +title: "Storage Container Manager" +date: "2017-09-14" +weight: 3 +menu: + main: + parent: Architecture +summary: Storage Container Manager or SCM is the core metadata service of Ozone. SCM provides a distributed block layer for Ozone. +--- + + +Storage Container Manager (SCM) is the leader node of the *block space management*. The main responsibility is to create and manage [containers]({{}}) which is the main replication unit of Ozone. + + +![Storage Container Manager](StorageContainerManager.png) + +## Main responsibilities + +Storage container manager provides multiple critical functions for the Ozone +cluster. SCM acts as the cluster manager, Certificate authority, Block +manager and the Replica manager. + +SCM is in charge of creating an Ozone cluster. When an SCM is booted up via `init` command, SCM creates the cluster identity and root certificates needed for the SCM certificate authority. SCM manages the life cycle of a data node in the cluster. + + 1. SCM is the block manager. SCM +allocates blocks and assigns them to data nodes. Clients +read and write these blocks directly. + + 2. SCM keeps track of all the block +replicas. If there is a loss of data node or a disk, SCM +detects it and instructs data nodes make copies of the +missing blocks to ensure high availability. + + 3. **SCM's Certificate Authority** is in +charge of issuing identity certificates for each and every +service in the cluster. This certificate infrastructure makes +it easy to enable mTLS at network layer and the block +token infrastructure depends on this certificate infrastructure. + +## Main components + +For a detailed view of Storage Container Manager this section gives a quick overview about the provided network services and the stored persisted data. + +**Network services provided by Storage Container Manager:** + + * Pipelines: List/Delete/Activate/Deactivate + * pipelines are set of datanodes to form replication groups + * Raft groups are planned by SCM + * Containers: Create / List / Delete containers + * Admin related requests + * Safemode status/modification + * Replication manager start / stop + * CA authority service + * Required by other sever components + * Datanode HeartBeat protocol + * From Datanode to SCM (30 sec by default) + * Datanodes report the status of containers, node... + * SCM can add commands to the response + +Note: client doesn't connect directly to the SCM + +**Persisted state** + + +The following data is persisted in Storage Container Manager side in a specific RocksDB directory + + * Pipelines + * Replication group of servers. Maintained to find a group for new container/block allocations. + * Containers + * Containers are the replication units. Data is required to act in case of data under/over replicated. + * Deleted blocks + * Block data is deleted in the background. Need a list to follow the progress. + * Valid cert, Revoked certs + * Used by the internal Certificate Authority to authorize other Ozone services + +## Notable configuration + +key | default | description | +----|-------------|-------- +ozone.scm.container.size | 5GB | Default container size used by Ozone +ozone.scm.block.size | 256MB | The default size of a data block. +hdds.scm.safemode.min.datanode | 1 | Minimum number of datanodes to start the real work. +ozone.scm.http-address | 0.0.0.0:9876 | HTTP address of the SCM server +ozone.metadata.dirs | none | Directory to store persisted data (RocksDB). \ No newline at end of file diff --git a/hadoop-hdds/docs/content/concept/StorageContainerManager.png b/hadoop-hdds/docs/content/concept/StorageContainerManager.png new file mode 100644 index 000000000000..605c48c355f8 Binary files /dev/null and b/hadoop-hdds/docs/content/concept/StorageContainerManager.png differ diff --git a/hadoop-hdds/docs/content/concept/Hdds.zh.md b/hadoop-hdds/docs/content/concept/StorageContainerManager.zh.md similarity index 93% rename from hadoop-hdds/docs/content/concept/Hdds.zh.md rename to hadoop-hdds/docs/content/concept/StorageContainerManager.zh.md index d53090646cc0..da29869808c7 100644 --- a/hadoop-hdds/docs/content/concept/Hdds.zh.md +++ b/hadoop-hdds/docs/content/concept/StorageContainerManager.zh.md @@ -21,6 +21,12 @@ summary: Storage Container Manager(SCM)是 Ozone 的核心元数据服务 limitations under the License. --> +
+ +注意:本页面翻译的信息可能滞后,最新的信息请参看英文版的相关页面。 + +
+ SCM 为 Ozone 集群提供了多种重要功能,包括:集群管理、证书管理、块管理和副本管理等。 {{}} diff --git a/hadoop-hdds/docs/content/concept/_index.md b/hadoop-hdds/docs/content/concept/_index.md index 8f0aeb07c965..1441b00f2115 100644 --- a/hadoop-hdds/docs/content/concept/_index.md +++ b/hadoop-hdds/docs/content/concept/_index.md @@ -1,8 +1,8 @@ --- -title: Concepts +title: "Architecture" date: "2017-10-10" menu: main -weight: 6 +weight: 3 --- diff --git a/hadoop-hdds/docs/content/design/ec.md b/hadoop-hdds/docs/content/design/ec.md new file mode 100644 index 000000000000..415796d57597 --- /dev/null +++ b/hadoop-hdds/docs/content/design/ec.md @@ -0,0 +1,39 @@ +--- +title: Erasure Coding in Ozone +summary: Use Erasure Coding algorithm for efficient storage +date: 2020-06-30 +jira: HDDS-3816 +status: draft +author: Uma Maheswara Rao Gangumalla, Marton Elek, Stephen O'Donnell +--- + + +# Abstract + + Support Erasure Coding for read and write pipeline of Ozone. + +# Status + + The design doc describes two main methods to implement EC: + + * Container level, async Erasure Coding, to encode closed containers in the background + * Block level, striped Erasure Coding + + Second option can work only with new, dedicated write-path. Details of possible implementation will be included in the next version. + +# Link + + https://issues.apache.org/jira/secure/attachment/13006245/Erasure%20Coding%20in%20Apache%20Hadoop%20Ozone.pdf + diff --git a/hadoop-hdds/docs/content/design/multiraft.md b/hadoop-hdds/docs/content/design/multiraft.md index bccaff36f4ef..f9f978a98189 100644 --- a/hadoop-hdds/docs/content/design/multiraft.md +++ b/hadoop-hdds/docs/content/design/multiraft.md @@ -4,7 +4,7 @@ summary: Datanodes can be part of multiple independent RAFT groups / pipelines date: 2019-05-21 jira: HDDS-1564 status: implemented -author: +author: Li Cheng, Sammi Chen --- +# Abstract + +Proposal suggest to introduce a new storage-class abstraction which can be used to define different replication strategies (factor, type, ...) for different bucket/keys. + +# Link + +https://hackmd.io/4kxufJBOQNaKn7PKFK_6OQ?view diff --git a/hadoop-hdds/docs/content/design/topology.md b/hadoop-hdds/docs/content/design/topology.md new file mode 100644 index 000000000000..edd5a90662ea --- /dev/null +++ b/hadoop-hdds/docs/content/design/topology.md @@ -0,0 +1,29 @@ +--- +title: Topology-awareness +summary: Placement policy to use rack information for read and write +date: 2018-11-16 +jira: HDDS-698 +status: implemented +author: junping, xiaoyu, junjie, jitendra, anu, nicholas +--- + + +# Abstract + + Adjust read/write path to consider rack information for proper data placement. + +# Link + + * https://docs.google.com/document/d/1HsZqlBcEmlezU6HriUaIOFE9SFdcBoaiz15Qt_ng0P8/edit \ No newline at end of file diff --git a/hadoop-hdds/docs/content/design/trash.md b/hadoop-hdds/docs/content/design/trash.md index 78e077a31a14..b936aaecfb45 100644 --- a/hadoop-hdds/docs/content/design/trash.md +++ b/hadoop-hdds/docs/content/design/trash.md @@ -22,4 +22,9 @@ author: Matthew Sharp The design doc is uploaded to the JIRA: -https://issues.apache.org/jira/secure/attachment/12985273/Ozone_Trash_Feature.docx \ No newline at end of file +https://issues.apache.org/jira/secure/attachment/12985273/Ozone_Trash_Feature.docx + +## Special note + +Trash is disabled for both o3fs and ofs even if `fs.trash.interval` is set +on purpose. (HDDS-3982) diff --git a/hadoop-hdds/docs/content/design/ozone-volume-management.md b/hadoop-hdds/docs/content/design/volume-management.md similarity index 100% rename from hadoop-hdds/docs/content/design/ozone-volume-management.md rename to hadoop-hdds/docs/content/design/volume-management.md diff --git a/hadoop-hdds/docs/content/feature/GDPR.md b/hadoop-hdds/docs/content/feature/GDPR.md new file mode 100644 index 000000000000..47424844d946 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/GDPR.md @@ -0,0 +1,80 @@ +--- +title: "GDPR in Ozone" +date: "2019-September-17" +weight: 4 +summary: GDPR in Ozone +icon: user +menu: + main: + parent: Features +summary: Support to implement the "Right to be Forgotten" requirement of GDPR +--- + +--- + + +The General Data Protection Regulation (GDPR) is a law that governs how personal data should be handled. +This is an European Union law, but due to the nature of software oftentimes spills into other geographies. + +**Ozone supports GDPR's Right to Erasure(Right to be Forgotten) feature** + +When GDPR support is enabled all the keys are encrypt, by default. The encryption key is stored on the metadata server and used to encrypt the data for each of the requests. + +In case of a key deletion, Ozone deletes the metadata immediately but the binary data is deleted at the background in an async way. With GDPR support enabled, the encryption key is deleted immediately and as is, the data won't be possible to read any more even if the related binary (blocks or containers) are not yet deleted by the background process). + +Once you create a GDPR compliant bucket, any key created in that bucket will +automatically be GDPR compliant. + +Enabling GDPR compliance in Ozone is very straight forward. During bucket +creation, you can specify `--enforcegdpr=true` or `-g=true` and this will +ensure the bucket is GDPR compliant. Thus, any key created under this bucket +will automatically be GDPR compliant. + +GDPR can only be enabled on a new bucket. For existing buckets, you would +have to create a new GDPR compliant bucket and copy data from old bucket into + new bucket to take advantage of GDPR. + +Example to create a GDPR compliant bucket: + +```shell +ozone sh bucket create --enforcegdpr=true /hive/jan + +ozone sh bucket create -g=true /hive/jan +``` + +If you want to create an ordinary bucket then you can skip `--enforcegdpr` +and `-g` flags. + +## References + + * [Design doc]({{< ref "design/gdpr.md" >}}) diff --git a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.zh.md b/hadoop-hdds/docs/content/feature/GDPR.zh.md similarity index 91% rename from hadoop-hdds/docs/content/gdpr/GDPR in Ozone.zh.md rename to hadoop-hdds/docs/content/feature/GDPR.zh.md index e44957f537be..af0684dcfe08 100644 --- a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.zh.md +++ b/hadoop-hdds/docs/content/feature/GDPR.zh.md @@ -22,6 +22,11 @@ icon: user limitations under the License. --> +
+ +注意:本页面翻译的信息可能滞后,最新的信息请参看英文版的相关页面。 + +
在 Ozone 中遵守 GDPR 规范非常简单,只需要在创建桶时指定 `--enforcegdpr=true` 或 `-g=true` 参数,这样创建出的桶都是符合 GDPR 规范的,当然,在桶中创建的键也都自动符合。 diff --git a/hadoop-hdds/docs/content/feature/HA-OM-doublebuffer.png b/hadoop-hdds/docs/content/feature/HA-OM-doublebuffer.png new file mode 100644 index 000000000000..a71adce40a63 Binary files /dev/null and b/hadoop-hdds/docs/content/feature/HA-OM-doublebuffer.png differ diff --git a/hadoop-hdds/docs/content/feature/HA-OM.png b/hadoop-hdds/docs/content/feature/HA-OM.png new file mode 100644 index 000000000000..b1ff506f7860 Binary files /dev/null and b/hadoop-hdds/docs/content/feature/HA-OM.png differ diff --git a/hadoop-hdds/docs/content/feature/HA.md b/hadoop-hdds/docs/content/feature/HA.md new file mode 100644 index 000000000000..3e683f94f378 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/HA.md @@ -0,0 +1,115 @@ +--- +title: "High Availability" +weight: 1 +menu: + main: + parent: Features +summary: HA setup for Ozone to avoid any single point of failure. +--- + + +Ozone has two leader nodes (*Ozone Manager* for key space management and *Storage Container Management* for block space management) and storage nodes (Datanode). Data is replicated between datanodes with the help of RAFT consensus algorithm. + +To avoid any single point of failure the leader nodes also should have a HA setup. + + 1. HA of Ozone Manager is implemented with the help of RAFT (Apache Ratis) + 2. HA of Storage Container Manager is [under implementation]({{< ref "scmha.md">}}) + +## Ozone Manager HA + +A single Ozone Manager uses [RocksDB](https://github.com/facebook/rocksdb/) to persiste metadata (volumes, buckets, keys) locally. HA version of Ozone Manager does exactly the same but all the data is replicated with the help of the RAFT consensus algorithm to follower Ozone Manager instances. + +![OM HA](HA-OM.png) + +Client connects to the Leader Ozone Manager which process the request and schedule the replication with RAFT. When the request is replicated to all the followers the leader can return with the response. + +## Configuration + +HA mode of Ozone Manager can be enabled with the following settings in `ozone-site.xml`: + +```XML + + ozone.om.ratis.enable + true + +``` +One Ozone configuration (`ozone-site.xml`) can support multiple Ozone HA cluster. To select between the available HA clusters a logical name is required for each of the clusters which can be resolved to the IP addresses (and domain names) of the Ozone Managers. + +This logical name is called `serviceId` and can be configured in the `ozone-site.xml` + + ``` + + ozone.om.service.ids + cluster1,cluster2 + +``` + +For each of the defined `serviceId` a logical configuration name should be defined for each of the servers. + +```XML + + ozone.om.nodes.cluster1 + om1,om2,om3 + +``` + +The defined prefixes can be used to define the address of each of the OM services: + +```XML + + ozone.om.address.cluster1.om1 + host1 + + + ozone.om.address.cluster1.om2 + host2 + + + ozone.om.address.cluster1.om3 + host3 + +``` + +The defined `serviceId` can be used instead of a single OM host using [client interfaces]({{< ref "interface/_index.md" >}}) + +For example with `o3fs://` + +```shell +hdfs dfs -ls o3fs://bucket.volume.cluster1/prefix/ +``` + +Or with `ofs://`: + +```shell +hdfs dfs -ls ofs://cluster1/volume/bucket/prefix/ +``` + +## Implementation details + +Raft can guarantee the replication of any request if the request is persisted to the RAFT log on the majority of the nodes. To achive high throghput with Ozone Manager, it returns with the response even if the request is persisted only to the RAFT logs. + +RocksDB instaces are updated by a background thread with batching transactions (so called "double buffer" as when one of the buffers is used to commit the data the other one collects all the new requests for the next commit.) To make all data available for the next request even if the background process is not yet wrote them the key data is cached in the memory. + +![Double buffer](HA-OM-doublebuffer.png) + +The details of this approach discussed in a separated [design doc]({{< ref "design/omha.md" >}}) but it's integral part of the OM HA design. + +## References + + * Check [this page]({{< ref "design/omha.md" >}}) for the links to the original design docs + * Ozone distribution contains an example OM HA configuration, under the `compose/ozone-om-ha` directory which can be tested with the help of [docker-compose]({{< ref "start/RunningViaDocker.md" >}}). \ No newline at end of file diff --git a/hadoop-hdds/docs/content/feature/Observability.md b/hadoop-hdds/docs/content/feature/Observability.md new file mode 100644 index 000000000000..2913abd4b125 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/Observability.md @@ -0,0 +1,224 @@ +--- +title: "Observability" +weight: 8 +menu: + main: + parent: Features +summary: Different tools for Ozone to increase Observability +--- + + +Ozone provides multiple tools to get more information about the current state of the cluster. + +## Prometheus + +Ozone has native Prometheus. Each internal metrics (collected by Hadoop metrics framework) published under the `/prom` HTTP context. (For example under http://localhost:9876/prom for SCM). + +The Prometheus endpoint is turned on by default but can be turned off by the `hdds.prometheus.endpoint.enabled` configuration variable. + +In a secure environment the page is guarded with SPNEGO authentication which is not supported by Prometheus. To enable monitoring in a secure environment a specific authentication token cen be configured + +Example `ozone-site.xml`: + +```XML + + hdds.prometheus.endpoint.token + putyourtokenhere + +``` + +Example prometheus configuration: + +```YAML +scrape_configs: + - job_name: ozone + bearer_token: + metrics_path: /prom + static_configs: + - targets: + - "127.0.0.1:9876" +``` + +## Distributed tracing + +Distributed tracing can help to understand performance bottleneck with visualizing end-to-end performance. + +Ozone uses [jaeger](https://jaegertracing.io) tracing library to collect traces which can send tracing data to any compatible backend (Zipkin, ...). + +Tracing is turned off by default, but can be turned on with `hdds.tracing.enabled` from `ozone-site.xml` + +```XML + + hdds.tracing.enabled + true + +``` + +Jager client can be configured with environment variables as documented [here](https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md): + +For example: + +```shell +JAEGER_SAMPLER_PARAM=0.01 +JAEGER_SAMPLER_TYPE=probabilistic +JAEGER_AGENT_HOST=jaeger +``` + +This configuration will record 1% of the requests to limit the performance overhead. For more information about jaeger sampling [check the documentation](https://www.jaegertracing.io/docs/1.18/sampling/#client-sampling-configuration) + +## ozone insight + +Ozone insight is a swiss-army-knife tool to for checking the current state of Ozone cluster. It can show logging, metrics and configuration for a particular component. + +To check the available components use `ozone insight list`: + +```shell +> ozone insight list + +Available insight points: + + scm.node-manager SCM Datanode management related information. + scm.replica-manager SCM closed container replication manager + scm.event-queue Information about the internal async event delivery + scm.protocol.block-location SCM Block location protocol endpoint + scm.protocol.container-location SCM Container location protocol endpoint + scm.protocol.security SCM Block location protocol endpoint + om.key-manager OM Key Manager + om.protocol.client Ozone Manager RPC endpoint + datanode.pipeline More information about one ratis datanode ring. +``` + +### Configuration + +`ozone insight config` can show configuration related to a specific component (supported only for selected components). + +```shell +> ozone insight config scm.replica-manager + +Configuration for `scm.replica-manager` (SCM closed container replication manager) + +>>> hdds.scm.replication.thread.interval + default: 300s + current: 300s + +There is a replication monitor thread running inside SCM which takes care of replicating the containers in the cluster. This property is used to configure the interval in which that thread runs. + + +>>> hdds.scm.replication.event.timeout + default: 30m + current: 30m + +Timeout for the container replication/deletion commands sent to datanodes. After this timeout the command will be retried. + +``` + +### Metrics + +`ozone insight metrics` can show metrics related to a specific component (supported only for selected components). + + +```shell +> ozone insight metrics scm.protocol.block-location +Metrics for `scm.protocol.block-location` (SCM Block location protocol endpoint) + +RPC connections + + Open connections: 0 + Dropped connections: 0 + Received bytes: 1267 + Sent bytes: 2420 + + +RPC queue + + RPC average queue time: 0.0 + RPC call queue length: 0 + + +RPC performance + + RPC processing time average: 0.0 + Number of slow calls: 0 + + +Message type counters + + Number of AllocateScmBlock: ??? + Number of DeleteScmKeyBlocks: ??? + Number of GetScmInfo: ??? + Number of SortDatanodes: ??? +``` + +### Logs + +`ozone insight logs` can connect to the required service and show the DEBUG/TRACE log related to one specific component. For example to display RPC message: + +```shell +>ozone insight logs om.protocol.client + +[OM] 2020-07-28 12:31:49,988 [DEBUG|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] OzoneProtocol ServiceList request is received +[OM] 2020-07-28 12:31:50,095 [DEBUG|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] OzoneProtocol CreateVolume request is received +``` + +Using `-v` flag the content of the protobuf message can also be displayed (TRACE level log): + +```shell +ozone insight logs -v om.protocol.client + +[OM] 2020-07-28 12:33:28,463 [TRACE|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] [service=OzoneProtocol] [type=CreateVolume] request is received: +cmdType: CreateVolume +traceID: "" +clientId: "client-A31DF5C6ECF2" +createVolumeRequest { + volumeInfo { + adminName: "hadoop" + ownerName: "hadoop" + volume: "vol1" + quotaInBytes: 1152921504606846976 + volumeAcls { + type: USER + name: "hadoop" + rights: "200" + aclScope: ACCESS + } + volumeAcls { + type: GROUP + name: "users" + rights: "200" + aclScope: ACCESS + } + creationTime: 1595939608460 + objectID: 0 + updateID: 0 + modificationTime: 0 + } +} + +[OM] 2020-07-28 12:33:28,474 [TRACE|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] [service=OzoneProtocol] [type=CreateVolume] request is processed. Response: +cmdType: CreateVolume +traceID: "" +success: false +message: "Volume already exists" +status: VOLUME_ALREADY_EXISTS +``` + + \ No newline at end of file diff --git a/hadoop-hdds/docs/content/shell/_index.md b/hadoop-hdds/docs/content/feature/Recon.md similarity index 52% rename from hadoop-hdds/docs/content/shell/_index.md rename to hadoop-hdds/docs/content/feature/Recon.md index 3cb1a9f61672..7234b809bc7b 100644 --- a/hadoop-hdds/docs/content/shell/_index.md +++ b/hadoop-hdds/docs/content/feature/Recon.md @@ -1,8 +1,10 @@ --- -title: Command Line Interface +title: "Recon" +weight: 7 menu: main: - weight: 3 + parent: Features +summary: Recon is the Web UI and analysis service for Ozone --- +Recon is the Web UI and analytics service for Ozone. It's an optional component, but strongly recommended as it can add additional visibility. -{{}} - Ozone shell is the primary interface to interact with Ozone. - It provides a command shell interface to work against Ozone. -{{}} +Recon collects all the data from an Ozone cluster and **store** them in a SQL database for further analyses. + + 1. Ozone Manager data is downloaded in the background by an async process. A RocksDB snapshots are created on OM side periodically, and the incremental data is copied to Recon and processed. + 2. Datanodes can send Heartbeats not just to SCM but Recon. Recon can be a read-only listener of the Heartbeats and updates the local database based on the received information. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/feature/Topology.md b/hadoop-hdds/docs/content/feature/Topology.md new file mode 100644 index 000000000000..71c289c56d4a --- /dev/null +++ b/hadoop-hdds/docs/content/feature/Topology.md @@ -0,0 +1,108 @@ +--- +title: "Topology awareness" +weight: 2 +menu: + main: + parent: Features +summary: Configuration for rack-awarness for improved read/write +--- + + +Ozone can use topology related information (for example rack placement) to optimize read and write pipelines. To get full rack-aware cluster, Ozone requires three different configuration. + + 1. The topology information should be configured by Ozone. + 2. Topology related information should be used when Ozone chooses 3 different datanodes for a specific pipeline/container. (WRITE) + 3. When Ozone reads a Key it should prefer to read from the closest node. + + + +## Topology hierarchy + +Topology hierarchy can be configured with using `net.topology.node.switch.mapping.impl` configuration key. This configuration should define an implementation of the `org.apache.hadoop.net.CachedDNSToSwitchMapping`. As this is a Hadoop class, the configuration is exactly the same as the Hadoop Configuration + +### Static list + +Static list can be configured with the help of ```TableMapping```: + +```XML + + net.topology.node.switch.mapping.impl + org.apache.hadoop.net.TableMapping + + + net.topology.table.file.name + /opt/hadoop/compose/ozone-topology/network-config + +``` + +The second configuration option should point to a text file. The file format is a two column text file, with columns separated by whitespace. The first column is a DNS or IP address and the second column specifies the rack where the address maps. If no entry corresponding to a host in the cluster is found, then `/default-rack` is assumed. + +### Dynamic list + +Rack information can be identified with the help of an external script: + + +```XML + + net.topology.node.switch.mapping.impl + org.apache.hadoop.net.TableMapping + + + org.apache.hadoop.net.ScriptBasedMapping + /usr/local/bin/rack.sh + +``` + +If implementing an external script, it will be specified with the `net.topology.script.file.name` parameter in the configuration files. Unlike the java class, the external topology script is not included with the Ozone distribution and is provided by the administrator. Ozone will send multiple IP addresses to ARGV when forking the topology script. The number of IP addresses sent to the topology script is controlled with `net.topology.script.number.args` and defaults to 100. If `net.topology.script.number.args` was changed to 1, a topology script would get forked for each IP submitted. + +## Write path + +Placement of the closed containers can be configured with `ozone.scm.container.placement.impl` configuration key. The available container placement policies can be found in the `org.apache.hdds.scm.container.placement` [package](https://github.com/apache/hadoop-ozone/tree/master/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms). + +By default the `SCMContainerPlacementRandom` is used for topology-awareness the `SCMContainerPlacementRackAware` can be used: + +```XML + + ozone.scm.container.placement.impl + org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware + +``` + +This placement policy complies with the algorithm used in HDFS. With default 3 replica, two replicas will be on the same rack, the third one will on a different rack. + +This implementation applies to network topology like "/rack/node". Don't recommend to use this if the network topology has more layers. + +## Read path + +Finally the read path also should be configured to read the data from the closest pipeline. + +```XML + + ozone.network.topology.aware.read + true + +``` + +## References + + * Hadoop documentation about `net.topology.node.switch.mapping.impl`: https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/RackAwareness.html + * [Design doc]({{< ref "design/topology.md">}}) \ No newline at end of file diff --git a/hadoop-hdds/docs/content/gdpr/_index.md b/hadoop-hdds/docs/content/feature/_index.md similarity index 80% rename from hadoop-hdds/docs/content/gdpr/_index.md rename to hadoop-hdds/docs/content/feature/_index.md index 017206e9fbcd..2b30d3ddb37c 100644 --- a/hadoop-hdds/docs/content/gdpr/_index.md +++ b/hadoop-hdds/docs/content/feature/_index.md @@ -1,9 +1,8 @@ --- -title: GDPR -name: GDPR -identifier: gdpr +title: Features +name: Features menu: main -weight: 5 +weight: 4 --- - - -Enabling GDPR compliance in Ozone is very straight forward. During bucket -creation, you can specify `--enforcegdpr=true` or `-g=true` and this will -ensure the bucket is GDPR compliant. Thus, any key created under this bucket -will automatically be GDPR compliant. - -GDPR can only be enabled on a new bucket. For existing buckets, you would -have to create a new GDPR compliant bucket and copy data from old bucket into - new bucket to take advantage of GDPR. - -Example to create a GDPR compliant bucket: - -`ozone sh bucket create --enforcegdpr=true /hive/jan` - -`ozone sh bucket create -g=true /hive/jan` - -If you want to create an ordinary bucket then you can skip `--enforcegdpr` -and `-g` flags. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/interface/CSI.md b/hadoop-hdds/docs/content/interface/CSI.md index b70572f77f5d..d1971c14b7f2 100644 --- a/hadoop-hdds/docs/content/interface/CSI.md +++ b/hadoop-hdds/docs/content/interface/CSI.md @@ -1,6 +1,9 @@ --- title: CSI Protocol -weight: 3 +weight: 6 +menu: + main: + parent: "Client Interfaces" summary: Ozone supports Container Storage Interface(CSI) protocol. You can use Ozone by mounting an Ozone volume by Ozone CSI. --- @@ -21,10 +24,18 @@ summary: Ozone supports Container Storage Interface(CSI) protocol. You can use O limitations under the License. --> -`Container Storage Interface` (CSI) will enable storage vendors (SP) to develop a plugin once and have it work across a number of container orchestration (CO) systems. +`Container Storage Interface` (CSI) will enable storage vendors (SP) to develop a plugin once and have it work across a number of container orchestration (CO) systems like Kubernetes or Yarn. To get more information about CSI at [SCI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md) +CSI defined a simple GRPC interface with 3 interfaces (Identity, Controller, Node). It defined how the Container Orchestrator can request the creation of a new storage space or the mount of the newly created storage but doesn't define how the storage can be mounted. + +![CSI](CSI.png) + +By default Ozone CSI service uses a S3 fuse driver ([goofys](https://github.com/kahing/goofys)) to mount the created Ozone bucket. Implementation of other mounting options such as a dedicated NFS server or native Fuse driver is work in progress. + + + Ozone CSI is an implementation of CSI, it can make possible of using Ozone as a storage volume for a container. ## Getting started @@ -35,13 +46,6 @@ through goofys. If you don't have an Ozone cluster on kubernetes, you can reference [Kubernetes]({{< ref "start/Kubernetes.md" >}}) to create one. Use the resources from `kubernetes/examples/ozone` where you can find all the required Kubernetes resources to run cluster together with the dedicated Ozone CSI daemon (check `kubernetes/examples/ozone/csi`) -You should check if you already have a name of `/s3v` volume, if not create it by execute follow command: - -```bash -kubectl exec -it scm-0 bash -[hadoop@scm-0 ~]$ ozone sh vol create s3v -``` - Now, create the CSI related resources by execute the follow command. ```bash diff --git a/hadoop-hdds/docs/content/interface/CSI.png b/hadoop-hdds/docs/content/interface/CSI.png new file mode 100644 index 000000000000..38720c3019cf Binary files /dev/null and b/hadoop-hdds/docs/content/interface/CSI.png differ diff --git a/hadoop-hdds/docs/content/interface/CSI.zh.md b/hadoop-hdds/docs/content/interface/CSI.zh.md new file mode 100644 index 000000000000..23fea66ee9d6 --- /dev/null +++ b/hadoop-hdds/docs/content/interface/CSI.zh.md @@ -0,0 +1,89 @@ +--- +title: CSI 协议 +weight: 3 +summary: Ozone 支持 容器存储接口 (CSI) 协议。你可以通过 Ozone CSI 挂载 Ozone 桶的方式使用 Ozone。 +--- + + + +容器存储接口 `Container Storage Interface` (CSI) 使存储供应商(SP)能够一次性开发一个插件,并让它跨多个容器编排工作, +就像 Kubernetes 或者 YARN。 + +获取更多 CSI 的信息,可以参考[SCI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md) + +CSI 定义了一个简单的,包含3个接口(Identity, Controller, Node)的 GRPC 接口,它定义了容器编排器如何请求创建新的存储空间或挂载新创建的存储, +但没有定义如何挂载存储。 + +![CSI](CSI.png) + +默认情况下,Ozone CSI 服务使用 S3 FUSE 驱动程序([goofys](https://github.com/kahing/goofys))挂载 Ozone 桶。 +其他挂载方式(如专用 NFS 服务或本机FUSE驱动程序)的实现正在进行中。 + + + +Ozone CSI 是 CSI 的一种实现,它可以将 Ozone 用作容器的存储卷。 + +## 入门 + +首先,我们需要一个带有 s3gateway 的 Ozone 集群,并且它的 OM 和 s3gateway 的端口都可以对 CSI pod 可见, +因为 CSIServer 将会访问 OM 来创建或者删除桶,同时 CSIServer 通过 goofys 创建一个可以访问 s3g 的挂载点来发布卷。 + +如果你没有一个运行在 Kubernetes 上的 Ozone 集群,你可以参考[Kubernetes]({{< ref "start/Kubernetes.zh.md" >}}) 来创建一个。 +使用来自 `kubernetes/examples/ozone`的资源,你可以找到所有需要的 Kubernetes 资源来和指定的 CSI 运行在一起 +(参考 `kubernetes/examples/ozone/csi`) + +现在,使用如下命令,创建 CSI 相关的资源。 + +```bash +kubectl create -f /ozone/kubernetes/examples/ozone/csi +``` + +## 创建 pv-test 并查看结果 + +通过执行以下命令,创建 pv-test 相关的资源。 + +```bash +kubectl create -f /ozone/kubernetes/examples/ozone/pv-test +``` + +连接 pod scm-0 并在 /s3v/pvc* 桶中创建一个键值。 + +```bash +kubectl exec -it scm-0 bash +[hadoop@scm-0 ~]$ ozone sh bucket list s3v +{ + "metadata" : { }, + "volumeName" : "s3v", + "name" : "pvc-861e2d8b-2232-4cd1-b43c-c0c26697ab6b", + "storageType" : "DISK", + "versioning" : false, + "creationTime" : "2020-06-11T08:19:47.469Z", + "encryptionKeyName" : null +} +[hadoop@scm-0 ~]$ ozone sh key put /s3v/pvc-861e2d8b-2232-4cd1-b43c-c0c26697ab6b/A LICENSE.txt +``` + +现在,通过映射 `ozone-csi-test-webserver-7cbdc5d65c-h5mnn` 端口,我们可以使用浏览器展示其 UI 页面。 + +```bash +kubectl port-forward ozone-csi-test-webserver-7cbdc5d65c-h5mnn 8000:8000 +``` + +最终,我们可以通过 `http://localhost:8000/` 看到结果 + +![pvtest-webui](pvtest-webui.png) diff --git a/hadoop-hdds/docs/content/interface/Cli.md b/hadoop-hdds/docs/content/interface/Cli.md new file mode 100644 index 000000000000..d65d573c0074 --- /dev/null +++ b/hadoop-hdds/docs/content/interface/Cli.md @@ -0,0 +1,208 @@ +--- +title: Command Line Interface +weight: 4 +menu: + main: + parent: "Client Interfaces" +--- + + + +Ozone shell is the primary interface to interact with Ozone from the command line. Behind the scenes it uses the [Java API]({{< ref "interface/JavaApi.md">}}). + + There are some functionality which couldn't be accessed without using `ozone sh` commands. For example: + + 1. Creating volumes with quota + 2. Managing internal ACLs + 3. Creating buckets with encryption key + +All of these are one-time, administration tasks. Applications can use Ozone without this CLI using other interface like Hadoop Compatible File System (o3fs or ofs) or S3 interface. + + +Ozone shell help can be invoked at _object_ level or at _action_ level. + +For example: + +```bash +ozone sh volume --help +``` + +will show all possible actions for volumes. + +Or it can be invoked to explain a specific action like: + +```bash +ozone sh volume create --help +``` + +which will print the command line options of the `create` command for volumes. + +## General Command Format + +Ozone shell commands take the following form: + +> _ozone sh object action url_ + +**ozone** script is used to invoke all Ozone sub-commands. The ozone shell is +invoked via ```sh``` command. + +Object can be volume, bucket or key. Actions are various verbs like +create, list, delete etc. + +Depending on the action, Ozone URL can point to a volume, bucket or key in the following format: + +_\[schema\]\[server:port\]/volume/bucket/key_ + + +Where, + +1. **Schema** - This should be `o3` which is the native RPC protocol to access + Ozone API. The usage of the schema is optional. + +2. **Server:Port** - This is the address of the Ozone Manager. If the port is +omitted the default port from ozone-site.xml will be used. + +Please see volume commands, bucket commands, and key commands section for more +detail. + +## Volume operations + +Volume is the top level element of the hierarchy, managed only by administrators. Optionally, quota and the owner user can be specified. + +Example commands: + +```shell +$ ozone sh volume create /vol1 +``` + +```shell +$ ozone sh volume info /vol1 +{ + "metadata" : { }, + "name" : "vol1", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-28T12:31:50.112Z", + "modificationTime" : "2020-07-28T12:31:50.112Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +} +``` + +```shell +$ ozone sh volume list / +{ + "metadata" : { }, + "name" : "s3v", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-27T11:32:22.314Z", + "modificationTime" : "2020-07-27T11:32:22.314Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +} +.... +``` +## Bucket operations + +Bucket is the second level of the object hierarchy, and is similar to AWS S3 buckets. Users can create buckets in volumes, if they have the necessary permissions. + +Command examples: + +```shell +$ ozone sh bucket create /vol1/bucket1 +```shell + +```shell +$ ozone sh bucket info /vol1/bucket1 +{ + "metadata" : { }, + "volumeName" : "vol1", + "name" : "bucket1", + "storageType" : "DISK", + "versioning" : false, + "creationTime" : "2020-07-28T13:14:45.091Z", + "modificationTime" : "2020-07-28T13:14:45.091Z", + "encryptionKeyName" : null, + "sourceVolume" : null, + "sourceBucket" : null +} +``` + +[Transparent Data Encryption]({{< ref "security/SecuringTDE.md" >}}) can be enabled at the bucket level. + +## Key operations + +Key is the object which can store the data. + +```shell +$ ozone sh key put /vol1/bucket1/README.md README.md +``` + + + + + +```shell +$ ozone sh key info /vol1/bucket1/README.md +{ + "volumeName" : "vol1", + "bucketName" : "bucket1", + "name" : "README.md", + "dataSize" : 3841, + "creationTime" : "2020-07-28T13:17:20.749Z", + "modificationTime" : "2020-07-28T13:17:21.979Z", + "replicationType" : "RATIS", + "replicationFactor" : 1, + "ozoneKeyLocations" : [ { + "containerID" : 1, + "localID" : 104591670688743424, + "length" : 3841, + "offset" : 0 + } ], + "metadata" : { }, + "fileEncryptionInfo" : null +} +``` + +```shell +$ ozone sh key get /vol1/bucket1/README.md /tmp/ +``` diff --git a/hadoop-hdds/docs/content/interface/JavaApi.md b/hadoop-hdds/docs/content/interface/JavaApi.md index bb18068f4000..2a97922d7415 100644 --- a/hadoop-hdds/docs/content/interface/JavaApi.md +++ b/hadoop-hdds/docs/content/interface/JavaApi.md @@ -1,7 +1,10 @@ --- title: "Java API" date: "2017-09-14" -weight: 1 +weight: 5 +menu: + main: + parent: "Client Interfaces" summary: Ozone has a set of Native RPC based APIs. This is the lowest level API's on which all other protocols are built. This is the most performant and feature-full of all Ozone protocols. --- The Hadoop compatible file system interface allows storage backends like Ozone -to be easily integrated into Hadoop eco-system. Ozone file system is an -Hadoop compatible file system. Currently, Ozone supports two scheme: o3fs and ofs. -The biggest difference between the o3fs and ofs,is that o3fs supports operations -only at a single bucket, while ofs supports operations across all volumes and buckets. -you can Refer to "Differences from existing o3FS "in ofs.md for details of the differences. +to be easily integrated into Hadoop eco-system. Ozone file system is an +Hadoop compatible file system. + + ## Setting up the o3fs @@ -43,7 +52,7 @@ Once this is created, please make sure that bucket exists via the _list volume_ Please add the following entry to the core-site.xml. -{{< highlight xml >}} +```XML fs.AbstractFileSystem.o3fs.impl org.apache.hadoop.fs.ozone.OzFs @@ -52,7 +61,7 @@ Please add the following entry to the core-site.xml. fs.defaultFS o3fs://bucket.volume -{{< /highlight >}} +``` This will make this bucket to be the default Hadoop compatible file system and register the o3fs file system type. @@ -116,52 +125,3 @@ hdfs dfs -ls o3fs://bucket.volume.om-host.example.com:6789/key Note: Only port number from the config is used in this case, whereas the host name in the config `ozone.om.address` is ignored. -## Setting up the ofs -This is just a general introduction. For more detailed usage, you can refer to ofs.md. - -Please add the following entry to the core-site.xml. - -{{< highlight xml >}} - - fs.ofs.impl - org.apache.hadoop.fs.ozone.RootedOzoneFileSystem - - - fs.defaultFS - ofs://om-host.example.com/ - -{{< /highlight >}} - -This will make all the volumes and buckets to be the default Hadoop compatible file system and register the ofs file system type. - -You also need to add the ozone-filesystem-hadoop3.jar file to the classpath: - -{{< highlight bash >}} -export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/hadoop-ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH -{{< /highlight >}} - -(Note: with Hadoop 2.x, use the `hadoop-ozone-filesystem-hadoop2-*.jar`) - -Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc. -For example: - -{{< highlight bash >}} -hdfs dfs -ls / -{{< /highlight >}} - -Note that ofs works on all buckets and volumes. Users can create buckets and volumes using mkdir, such as create volume named volume1 and bucket named bucket1: - -{{< highlight bash >}} -hdfs dfs -mkdir /volume1 -hdfs dfs -mkdir /volume1/bucket1 -{{< /highlight >}} - - -Or use the put command to write a file to the bucket. - -{{< highlight bash >}} -hdfs dfs -put /etc/hosts /volume1/bucket1/test -{{< /highlight >}} - -For more usage, see: https://issues.apache.org/jira/secure/attachment/12987636/Design%20ofs%20v1.pdf - diff --git a/hadoop-hdds/docs/content/interface/OzoneFS.zh.md b/hadoop-hdds/docs/content/interface/O3fs.zh.md similarity index 97% rename from hadoop-hdds/docs/content/interface/OzoneFS.zh.md rename to hadoop-hdds/docs/content/interface/O3fs.zh.md index 996991962c75..0b2a06f32181 100644 --- a/hadoop-hdds/docs/content/interface/OzoneFS.zh.md +++ b/hadoop-hdds/docs/content/interface/O3fs.zh.md @@ -21,6 +21,12 @@ summary: Hadoop 文件系统兼容使得任何使用类 HDFS 接口的应用无 limitations under the License. --> +
+ +注意:本页面翻译的信息可能滞后,最新的信息请参看英文版的相关页面。 + +
+ Hadoop 的文件系统接口兼容可以让任意像 Ozone 这样的存储后端轻松地整合进 Hadoop 生态系统,Ozone 文件系统就是一个兼容 Hadoop 的文件系统。 目前ozone支持两种协议: o3fs和ofs。两者最大的区别是o3fs只支持在单个bucket上操作,而ofs则支持跨所有volume和bucket的操作。关于两者在操作 上的具体区别可以参考ofs.md中的"Differences from existing o3fs"。 diff --git a/hadoop-hdds/docs/content/interface/Ofs.md b/hadoop-hdds/docs/content/interface/Ofs.md new file mode 100644 index 000000000000..fcc1467a7102 --- /dev/null +++ b/hadoop-hdds/docs/content/interface/Ofs.md @@ -0,0 +1,227 @@ +--- +title: Ofs (Hadoop compatible) +date: 2017-09-14 +weight: 1 +menu: + main: + parent: "Client Interfaces" +summary: Hadoop Compatible file system allows any application that expects an HDFS like interface to work against Ozone with zero changes. Frameworks like Apache Spark, YARN and Hive work against Ozone without needing any change. **Global level view.** +--- + + +The Hadoop compatible file system interface allows storage backends like Ozone +to be easily integrated into Hadoop eco-system. Ozone file system is an +Hadoop compatible file system. + + + + +## The Basics + +Examples of valid OFS paths: + +``` +ofs://om1/ +ofs://om3:9862/ +ofs://omservice/ +ofs://omservice/volume1/ +ofs://omservice/volume1/bucket1/ +ofs://omservice/volume1/bucket1/dir1 +ofs://omservice/volume1/bucket1/dir1/key1 + +ofs://omservice/tmp/ +ofs://omservice/tmp/key1 +``` + +Volumes and mount(s) are located at the root level of an OFS Filesystem. +Buckets are listed naturally under volumes. +Keys and directories are under each buckets. + +Note that for mounts, only temp mount `/tmp` is supported at the moment. + +## Configuration + + +Please add the following entry to the core-site.xml. + +{{< highlight xml >}} + + fs.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + + + fs.defaultFS + ofs://om-host.example.com/ + +{{< /highlight >}} + +This will make all the volumes and buckets to be the default Hadoop compatible file system and register the ofs file system type. + +You also need to add the ozone-filesystem-hadoop3.jar file to the classpath: + +{{< highlight bash >}} +export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/hadoop-ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH +{{< /highlight >}} + +(Note: with Hadoop 2.x, use the `hadoop-ozone-filesystem-hadoop2-*.jar`) + +Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc. +For example: + +{{< highlight bash >}} +hdfs dfs -ls / +{{< /highlight >}} + +Note that ofs works on all buckets and volumes. Users can create buckets and volumes using mkdir, such as create volume named volume1 and bucket named bucket1: + +{{< highlight bash >}} +hdfs dfs -mkdir /volume1 +hdfs dfs -mkdir /volume1/bucket1 +{{< /highlight >}} + + +Or use the put command to write a file to the bucket. + +{{< highlight bash >}} +hdfs dfs -put /etc/hosts /volume1/bucket1/test +{{< /highlight >}} + +For more usage, see: https://issues.apache.org/jira/secure/attachment/12987636/Design%20ofs%20v1.pdf + +## Special note + +Trash is disabled even if `fs.trash.interval` is set on purpose. (HDDS-3982) + +## Differences from [o3fs]({{< ref "interface/O3fs.md" >}}) + +### Creating files + +OFS doesn't allow creating keys(files) directly under root or volumes. +Users will receive an error message when they try to do that: + +``` +$ ozone fs -touch /volume1/key1 +touch: Cannot create file under root or volume. +``` + +### Simplify fs.defaultFS + +With OFS, fs.defaultFS (in core-site.xml) no longer needs to have a specific +volume and bucket in its path like o3fs did. +Simply put the OM host or service ID (in case of HA): + +``` + +fs.defaultFS +ofs://omservice + +``` + +The client would then be able to access every volume and bucket on the cluster +without specifying the hostname or service ID. + +``` +$ ozone fs -mkdir -p /volume1/bucket1 +``` + +### Volume and bucket management directly from FileSystem shell + +Admins can create and delete volumes and buckets easily with Hadoop FS shell. +Volumes and buckets are treated similar to directories so they will be created +if they don't exist with `-p`: + +``` +$ ozone fs -mkdir -p ofs://omservice/volume1/bucket1/dir1/ +``` + +Note that the supported volume and bucket name character set rule still applies. +For instance, bucket and volume names don't take underscore(`_`): + +``` +$ ozone fs -mkdir -p /volume_1 +mkdir: Bucket or Volume name has an unsupported character : _ +``` + +## Mounts + +In order to be compatible with legacy Hadoop applications that use /tmp/, +we have a special temp mount located at the root of the FS. +This feature may be expanded in the feature to support custom mount paths. + +Important: To use it, first, an **admin** needs to create the volume tmp +(the volume name is hardcoded for now) and set its ACL to world ALL access. +Namely: + +``` +$ ozone sh volume create tmp +$ ozone sh volume setacl tmp -al world::a +``` + +These commands only needs to be done **once per cluster**. + +Then, **each user** needs to mkdir first to initialize their own temp bucket +once. + +``` +$ ozone fs -mkdir /tmp +2020-06-04 00:00:00,050 [main] INFO rpc.RpcClient: Creating Bucket: tmp/0238 ... +``` + +After that they can write to it just like they would do to a regular +directory. e.g.: + +``` +$ ozone fs -touch /tmp/key1 +``` + +## Delete with trash enabled + +When keys are deleted with trash enabled, they are moved to a trash directory +under each bucket, because keys aren't allowed to be moved(renamed) between +buckets in Ozone. + +``` +$ ozone fs -rm /volume1/bucket1/key1 +2020-06-04 00:00:00,100 [main] INFO fs.TrashPolicyDefault: Moved: 'ofs://id1/volume1/bucket1/key1' to trash at: ofs://id1/volume1/bucket1/.Trash/hadoop/Current/volume1/bucket1/key1 +``` + +This is very similar to how the HDFS encryption zone handles trash location. + +## Recursive listing + +OFS supports recursive volume, bucket and key listing. + +i.e. `ozone fs -ls -R ofs://omservice/`` will recursively list all volumes, +buckets and keys the user has LIST permission to if ACL is enabled. +If ACL is disabled, the command would just list literally everything on that +cluster. + +This feature wouldn't degrade server performance as the loop is on the client. +Think it as a client is issuing multiple requests to the server to get all the +information. + +## Special note + +Trash is disabled even if `fs.trash.interval` is set on purpose. (HDDS-3982) diff --git a/hadoop-hdds/docs/content/interface/S3.md b/hadoop-hdds/docs/content/interface/S3.md index 94e455728f95..3404cb8233ec 100644 --- a/hadoop-hdds/docs/content/interface/S3.md +++ b/hadoop-hdds/docs/content/interface/S3.md @@ -1,6 +1,9 @@ --- title: S3 Protocol weight: 3 +menu: + main: + parent: "Client Interfaces" summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, You can use S3 clients and S3 SDK based applications without any modifications with Ozone. --- @@ -24,7 +27,7 @@ summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, Ozone provides S3 compatible REST interface to use the object store data with any S3 compatible tools. -S3 buckets are stored under the `/s3v`(Default is s3v, which can be setted through ozone.s3g.volume.name) volume, which needs to be created by an administrator first. +S3 buckets are stored under the `/s3v` volume. ## Getting started @@ -38,12 +41,6 @@ Go to the `compose/ozone` directory, and start the server: docker-compose up -d --scale datanode=3 ``` -Create the `/s3v` volume: - -```bash -docker-compose exec scm ozone sh volume create /s3v -``` - You can access the S3 gateway at `http://localhost:9878` ## URL Schema @@ -116,6 +113,24 @@ export AWS_SECRET_ACCESS_KEY=c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e aws s3api --endpoint http://localhost:9878 create-bucket --bucket bucket1 ``` +## Expose any volume + +Ozone has one more element in the name-space hierarchy compared to S3: the volumes. By default, all the buckets of the `/s3v` volume can be accessed with S3 interface but only the (Ozone) buckets of the `/s3v` volumes are exposed. + +To make any other buckets available with the S3 interface a "symbolic linked" bucket can be created: + +```bash +ozone sh create volume /s3v +ozone sh create volume /vol1 + +ozone sh create bucket /vol1/bucket1 +ozone sh bucket link /vol1/bucket1 /s3v/common-bucket +``` + +This example expose the `/vol1/bucket1` Ozone bucket as an S3 compatible `common-bucket` via the S3 interface. + +(Note: the implementation details of the bucket-linking feature can be found in the [design doc]({{< ref "design/volume-management.md">}})) + ## Clients ### AWS Cli diff --git a/hadoop-hdds/docs/content/interface/_index.md b/hadoop-hdds/docs/content/interface/_index.md index 254864732fb8..40ca5e7b249b 100644 --- a/hadoop-hdds/docs/content/interface/_index.md +++ b/hadoop-hdds/docs/content/interface/_index.md @@ -1,8 +1,8 @@ --- -title: "Programming Interfaces" +title: "Client Interfaces" menu: main: - weight: 4 + weight: 5 --- + +用户需要由 AWS 网站生成的 AWS access key ID 和 AWS secret 来访问 AWS S3 的桶,当你使用 Ozone 的 S3 协议时,你也需要同样的 AWS access key 和 secret。 + +在 Ozone 中,用户可以直接下载 access key。用户需要先执行 `kinit` 命令进行 Kerberos 认证,认证通过后就可以下载 S3 access key 和 secret。和 AWS S3 一样,access key 和 secret 具有 S3 桶的全部权限,用户需要保管好 key 和 secret。 + +* S3 客户端可以从 OM 获取 access key id 和 secret。 + +```bash +ozone s3 getsecret +``` +这条命令会与 Ozone 进行通信,对用户进行 Kerberos 认证并生成 AWS 凭据,结果会直接打印在屏幕上,你可以将其配置在 _.aws._ 文件中,这样可以在操作 Ozone S3 桶时自动进行认证。 + + + + +* 在 aws 配置中添加上述凭据: + +```bash +aws configure set default.s3.signature_version s3v4 +aws configure set aws_access_key_id ${accessId} +aws configure set aws_secret_access_key ${secret} +aws configure set region us-west-1 +``` +关于通过命令行和 S3 API 使用 S3,请参考 AWS S3 的文档。 diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md index 3e8f2d16819f..ae8f390d3cb6 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.md @@ -1,8 +1,11 @@ --- title: "Transparent Data Encryption" date: "2019-April-03" -summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. You can enable this per key or per bucket. -weight: 3 +summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. +weight: 2 +menu: + main: + parent: Security icon: lock --- + +Ozone TDE 的配置和使用和 HDFS TDE 十分相似,主要的区别是,Ozone 中桶级别的 TDE 必须在创建桶时启用。 + +### 配置密钥管理服务器 + +使用 TDE 之前,管理员必须要提前配置密钥管理服务 KMS,并且把 KMS 的 URI 通过 core-site.xml 提供给 Ozone。 + +参数名 | 值 +-----------------------------------|----------------------------------------- +hadoop.security.key.provider.path | KMS uri.
比如 kms://http@kms-host:9600/kms + +### 使用 TDE +如果你的集群已经配置好了 TDE,那么你只需要创建加密密钥并启用桶加密即可。 + +创建加密密钥的方法为: + * 使用 hadoop key 命令创建桶加密密钥,和 HDFS 加密区域的使用方法类似。 + + ```bash + hadoop key create encKey + ``` + 上面这个命令会创建一个用于保护桶数据的密钥。创建完成之后,你可以告诉 Ozone 在读写某个桶中的数据时使用这个密钥。 + + * 将加密密钥分配给桶 + + ```bash + ozone sh bucket create -k encKey /vol/encryptedBucket + ``` + +这条命令执行后,所以写往 _encryptedBucket_ 的数据都会用 encKey 进行加密,当读取里面的数据时,客户端通过 KMS 获取密钥进行解密。换句话说,Ozone 中存储的数据一直是加密的,但用户和客户端对此完全无感知。 diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md b/hadoop-hdds/docs/content/security/SecurityAcls.md index 31bbb0a95cc2..da4b28af8537 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.md @@ -2,6 +2,9 @@ title: "Ozone ACLs" date: "2019-April-03" weight: 6 +menu: + main: + parent: Security summary: Native Ozone Authorizer provides Access Control List (ACL) support for Ozone without Ranger integration. icon: transfer --- @@ -22,9 +25,9 @@ icon: transfer limitations under the License. --> -Ozone supports a set of native ACLs. These ACLs can be used independently or -along with Ranger. If Apache Ranger is enabled, then ACL will be checked -first with Ranger and then Ozone's internal ACLs will be evaluated. +Ozone supports a set of native ACLs. These ACLs can be used independently +of ozone ACL plugin such as Ranger. If Apache Ranger plugin for Ozone is +enabled, then ACL will be checked with Ranger. Ozone ACLs are a super set of Posix and S3 ACLs. @@ -52,7 +55,7 @@ we have no way of knowing who the user is or we don't care. diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md new file mode 100644 index 000000000000..9a3275ba69ea --- /dev/null +++ b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md @@ -0,0 +1,66 @@ +--- +title: "Ozone 访问控制列表" +date: "2019-April-03" +weight: 6 +summary: Ozone 原生的授权模块提供了不需要集成 Ranger 的访问控制列表(ACL)支持。 +icon: transfer +--- + + +Ozone 既支持原生的 ACL,也支持类似 Ranger 这样的 ACL 插件,如果启用了 Ranger 插件,则以 Ranger 中的 ACL 为准。 + +Ozone 的 ACL 是 Posix ACL 和 S3 ACL 的超集。 + +ACL 的通用格式为 _对象_:_角色_:_权限_. + +_对象_ 可选的值包括: + +1. **卷** - 一个 Ozone 卷,比如 _/volume_ +2. **桶** - 一个 Ozone 桶,比如 _/volume/bucket_ +3. **键** - 一个对象键,比如 _/volume/bucket/key_ +4. **前缀** - 某个键的路径前缀,比如 _/volume/bucket/prefix1/prefix2_ + +_角色_ 可选的值包括: + +1. **用户** - 一个 Kerberos 用户,和 Posix 用户一样,用户可以是已创建的也可以是未创建的。 +2. **组** - 一个 Kerberos 组,和 Posix 组一样,组可以是已创建的也可以是未创建的。 +3. **所有人** - 所有通过 Kerberos 认证的用户,这对应 Posix 标准中的其它用户。 +4. **匿名** - 完全忽略用户字段,这是对 Posix 语义的扩展,使用 S3 协议时会用到,用于表达无法获取用户的身份或者不在乎用户的身份。 + + + +_权限_ 可选的值包括:: + +1. **创建** – 此 ACL 为用户赋予在卷中创建桶,或者在桶中创建键的权限。请注意:在 Ozone 中,只有管理员可以创建卷。 +2. **列举** – 此 ACL 允许用户列举桶和键,因为列举的是子对象,所以这种 ACL 要绑定在卷和桶上。请注意:只有卷的属主和管理员可以对卷执行列举操作。 +3. **删除** – 允许用户删除卷、桶或键。 +4. **读取** – 允许用户读取卷和桶的元数据,以及读取键的数据流和元数据。 +5. **写入** - 允许用户修改卷和桶的元数据,以及重写一个已存在的键。 +6. **读 ACL** – 允许用户读取某个对象的 ACL。 +7. **写 ACL** – 允许用户修改某个对象的 ACL。 + +

Ozone 原生 ACL API

+ +ACL 可以通过 Ozone 提供的一系列 API 进行操作,支持的 API 包括: + +1. **SetAcl** – 此 API 的参数为用户主体、Ozone 对象名称、Ozone 对象的类型和 ACL 列表。 +2. **GetAcl** – 此 API 的参数为 Ozone 对象名称和 Ozone 对象类型,返回值为 ACL 列表。 +3. **AddAcl** - 此 API 的参数为 Ozone 对象名称、Ozone 对象类型和待添加的 ACL,新的 ACL 会被添加到该 Ozone 对象的 ACL 条目中。 +4. **RemoveAcl** - 此 API 的参数为 Ozone 对象名称、Ozone 对象类型和待删除的 ACL。 diff --git a/hadoop-hdds/docs/content/security/SecuityWithRanger.md b/hadoop-hdds/docs/content/security/SecurityWithRanger.md similarity index 97% rename from hadoop-hdds/docs/content/security/SecuityWithRanger.md rename to hadoop-hdds/docs/content/security/SecurityWithRanger.md index cbbd53ec7c12..7daaf8199e18 100644 --- a/hadoop-hdds/docs/content/security/SecuityWithRanger.md +++ b/hadoop-hdds/docs/content/security/SecurityWithRanger.md @@ -1,7 +1,10 @@ --- title: "Apache Ranger" date: "2019-April-03" -weight: 5 +weight: 7 +menu: + main: + parent: Security summary: Apache Ranger is a framework to enable, monitor and manage comprehensive data security across the Hadoop platform. icon: user --- diff --git a/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md b/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md new file mode 100644 index 000000000000..b879e9347c08 --- /dev/null +++ b/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md @@ -0,0 +1,35 @@ +--- +title: "Apache Ranger" +date: "2019-April-03" +weight: 5 +summary: Apache Ranger 是一个用于管理和监控 Hadoop 平台复杂数据权限的框架。 +icon: user +--- + + + +Apache Ranger™ 是一个用于管理和监控 Hadoop 平台复杂数据权限的框架。版本大于 1.20 的 Apache Ranger 都可以用于管理 Ozone 集群。 + +你需要先在你的 Hadoop 集群上安装 Apache Ranger,安装指南可以参考 [Apache Ranger 官网](https://ranger.apache.org/index.html). + +如果你已经安装好了 Apache Ranger,那么 Ozone 的配置十分简单,你只需要启用 ACL 支持并且将 ACL 授权类设置为 Ranger 授权类,在 ozone-site.xml 中添加下面的参数: + +参数名|参数值 +--------|------------------------------------------------------------ +ozone.acl.enabled | true +ozone.acl.authorizer.class| org.apache.ranger.authorization.ozone.authorizer.RangerOzoneAuthorizer diff --git a/hadoop-hdds/docs/content/shell/BucketCommands.md b/hadoop-hdds/docs/content/shell/BucketCommands.md deleted file mode 100644 index fa63ad78857a..000000000000 --- a/hadoop-hdds/docs/content/shell/BucketCommands.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Bucket Commands -summary: Bucket commands help you to manage the life cycle of a volume. -weight: 3 ---- - - -Ozone shell supports the following bucket commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - -### Create - -The `bucket create` command allows users to create a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -g, \-\-enforcegdpr | Optional, if set to true it creates a GDPR compliant bucket, if not specified or set to false, it creates an ordinary bucket. -| -k, \-\-bucketKey | Optional, if a bucket encryption key name from the configured KMS server is specified, the files in the bucket will be transparently encrypted. Instruction on KMS configuration can be found from Hadoop KMS document. -| Uri | The name of the bucket in **/volume/bucket** format. - - -{{< highlight bash >}} -ozone sh bucket create /hive/jan -{{< /highlight >}} - -The above command will create a bucket called _jan_ in the _hive_ volume. -Since no scheme was specified this command defaults to O3 (RPC) protocol. - -### Delete - -The `bucket delete` command allows users to delete a bucket. If the -bucket is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket - -{{< highlight bash >}} -ozone sh bucket delete /hive/jan -{{< /highlight >}} - -The above command will delete _jan_ bucket if it is empty. - -### Info - -The `bucket info` commands returns the information about the bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket. - -{{< highlight bash >}} -ozone sh bucket info /hive/jan -{{< /highlight >}} - -The above command will print out the information about _jan_ bucket. - -### List - -The `bucket list` command allows users to list the buckets in a volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | Maximum number of results to return. Default: 100 -| -p, \-\-prefix | Optional, Only buckets that match this prefix will be returned. -| -s, \-\-start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh bucket list /hive -{{< /highlight >}} - -This command will list all buckets on the volume _hive_. diff --git a/hadoop-hdds/docs/content/shell/BucketCommands.zh.md b/hadoop-hdds/docs/content/shell/BucketCommands.zh.md deleted file mode 100644 index 9afd28079c20..000000000000 --- a/hadoop-hdds/docs/content/shell/BucketCommands.zh.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: 桶命令 -summary: 用桶命令管理桶的生命周期 -weight: 3 ---- - - -Ozone shell 提供以下桶命令: - - * [创建](#创建) - * [删除](#删除) - * [查看](#查看) - * [列举](#列举) - -### 创建 - -用户使用 `bucket create` 命令来创建桶。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| -g, \-\-enforcegdpr | 可选,如果设置为 true 则创建符合 GDPR 规范的桶,设置为 false 或不指定则创建普通的桶| -| -k, \-\-bucketKey | 可选,如果指定了 KMS 服务器中的桶加密密钥名,该桶中的文件都会被自动加密,KMS 的配置说明可以参考 Hadoop KMS 文档。 -| Uri | 桶名,格式为 **/volume/bucket** | - - -{{< highlight bash >}} -ozone sh bucket create /hive/jan -{{< /highlight >}} - -上述命令会在 _hive_ 卷中创建一个名为 _jan_ 的桶,因为没有指定 scheme,默认使用 O3(RPC)协议。 - -### 删除 - -用户使用 `bucket delete` 命令来删除桶,如果桶不为空,此命令将失败。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 桶名 | - -{{< highlight bash >}} -ozone sh bucket delete /hive/jan -{{< /highlight >}} - -如果 _jan_ 桶不为空,上述命令会将其删除。 - -### 查看 - -`bucket info` 命令返回桶的信息。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 桶名 | - -{{< highlight bash >}} -ozone sh bucket info /hive/jan -{{< /highlight >}} - -上述命令会打印出 _jan_ 桶的有关信息。 - -### 列举 - -用户通过 `bucket list` 命令列举一个卷下的所有桶。 - -***参数:*** - -| 参数 | 说明 | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | 返回结果的最大数量,默认为 100 -| -p, \-\-prefix | 可选,只有匹配指定前缀的桶会被返回 -| -s, \-\-start | 从指定键开始列举 -| Uri | 卷名 - -{{< highlight bash >}} -ozone sh bucket list /hive -{{< /highlight >}} - -此命令会列出 _hive_ 卷中的所有桶。 diff --git a/hadoop-hdds/docs/content/shell/Format.md b/hadoop-hdds/docs/content/shell/Format.md deleted file mode 100644 index d6c9d2f51802..000000000000 --- a/hadoop-hdds/docs/content/shell/Format.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Shell Overview -summary: Explains the command syntax used by shell command. -weight: 1 ---- - - -Ozone shell help can be invoked at _object_ level or at _action_ level. -For example: - -{{< highlight bash >}} -ozone sh volume --help -{{< /highlight >}} - -This will show all possible actions for volumes. - -or it can be invoked to explain a specific action like -{{< highlight bash >}} -ozone sh volume create --help -{{< /highlight >}} -This command will give you command line options of the create command. - -

- - -### General Command Format - -The Ozone shell commands take the following format. - -> _ozone sh object action url_ - -**ozone** script is used to invoke all Ozone sub-commands. The ozone shell is -invoked via ```sh``` command. - -The object can be a volume, bucket or a key. The action is various verbs like -create, list, delete etc. - - -Ozone URL can point to a volume, bucket or keys in the following format: - -_\[schema\]\[server:port\]/volume/bucket/key_ - - -Where, - -1. **Schema** - This should be `o3` which is the native RPC protocol to access - Ozone API. The usage of the schema is optional. - -2. **Server:Port** - This is the address of the Ozone Manager. If the port is -omitted the default port from ozone-site.xml will be used. - -Depending on the call, the volume/bucket/key names will be part of the URL. -Please see volume commands, bucket commands, and key commands section for more -detail. diff --git a/hadoop-hdds/docs/content/shell/Format.zh.md b/hadoop-hdds/docs/content/shell/Format.zh.md deleted file mode 100644 index edfcbdc24a49..000000000000 --- a/hadoop-hdds/docs/content/shell/Format.zh.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Shell 概述 -summary: shell 命令的语法介绍。 -weight: 1 ---- - - -Ozone shell 的帮助命令既可以在 _对象_ 级别调用,也可以在 _操作_ 级别调用。 -比如: - -{{< highlight bash >}} -ozone sh volume --help -{{< /highlight >}} - -此命令会列出所有对卷的可能操作。 - -你也可以通过它查看特定操作的帮助,比如: - -{{< highlight bash >}} -ozone sh volume create --help -{{< /highlight >}} - -这条命令会给出 create 命令的命令行选项。 - -

- - -### 通用命令格式 - -Ozone shell 命令都遵照以下格式: - -> _ozone sh object action url_ - -**ozone** 脚本用来调用所有 Ozone 子命令,ozone shell 通过 ```sh``` 子命令调用。 - -对象可以是卷、桶或键,操作一般是各种动词,比如 create、list、delete 等等。 - - -Ozone URL 可以指向卷、桶或键,格式如下: - -_\[schema\]\[server:port\]/volume/bucket/key_ - - -其中, - -1. **Schema** - 可选,默认为 `o3`,表示使用原生 RPC 协议来访问 Ozone API。 - -2. **Server:Port** - OM 的地址,如果省略了端口, 则使用 ozone-site.xml 中的默认端口。 - -根据具体的命令不同,卷名、桶名和键名将用来构成 URL,卷、桶和键命令的文档有更多具体的说明。 diff --git a/hadoop-hdds/docs/content/shell/KeyCommands.md b/hadoop-hdds/docs/content/shell/KeyCommands.md deleted file mode 100644 index 11186c422184..000000000000 --- a/hadoop-hdds/docs/content/shell/KeyCommands.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Key Commands -summary: Key commands help you to manage the life cycle of - Keys / Objects. -weight: 4 ---- - - - -Ozone shell supports the following key commands. - - * [get](#get) - * [put](#put) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [rename](#rename) - * [cat](#cat) - * [copy](#cp) - - -### Get - -The `key get` command downloads a key from Ozone cluster to local file system. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to download the key to. - - -{{< highlight bash >}} -ozone sh key get /hive/jan/sales.orc sales.orc -{{< /highlight >}} -Downloads the file sales.orc from the _/hive/jan_ bucket and writes to the -local file sales.orc. - -### Put - -The `key put` command uploads a file from the local file system to the specified bucket. - -***Params:*** - - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to upload. -| -r, \-\-replication | Optional, Number of copies, ONE or THREE are the options. Picks up the default from cluster configuration. -| -t, \-\-type | Optional, replication type of the new key. RATIS and STAND_ALONE are the options. Picks up the default from cluster configuration. - -{{< highlight bash >}} -ozone sh key put /hive/jan/corrected-sales.orc sales.orc -{{< /highlight >}} -The above command will put the sales.orc as a new key into _/hive/jan/corrected-sales.orc_. - -### Delete - -The `key delete` command removes the key from the bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key delete /hive/jan/corrected-sales.orc -{{< /highlight >}} - -The above command deletes the key _/hive/jan/corrected-sales.orc_. - - -### Info - -The `key info` commands returns the information about the key. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key info /hive/jan/sales.orc -{{< /highlight >}} - -The above command will print out the information about _/hive/jan/sales.orc_ -key. - -### List - -The `key list` command allows user to list all keys in a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | Maximum number of results to return. Default: 100 -| -p, \-\-prefix | Optional, Only keys that match this prefix will be returned. -| -s, \-\-start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh key list /hive/jan -{{< /highlight >}} - -This command will list all keys in the bucket _/hive/jan_. - -### Rename - -The `key rename` command changes the name of an existing key in the specified bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket in **/volume/bucket** format. -| FromKey | The existing key to be renamed -| ToKey | The new desired name of the key - -{{< highlight bash >}} -ozone sh key rename /hive/jan sales.orc new_name.orc -{{< /highlight >}} -The above command will rename _sales.orc_ to _new\_name.orc_ in the bucket _/hive/jan_. - -### Cat - -The `key cat` command displays the contents of a specific Ozone key to standard output. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. - - -{{< highlight bash >}} -ozone sh key cat /hive/jan/hello.txt -{{< /highlight >}} -Displays the contents of the key hello.txt from the _/hive/jan_ bucket to standard output. - -### Cp - -The `key cp` command copies a key to another one in the specified bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket in **/volume/bucket** format. -| FromKey | The existing key to be copied -| ToKey | The name of the new key -| -r, \-\-replication | Optional, Number of copies, ONE or THREE are the options. Picks up the default from cluster configuration. -| -t, \-\-type | Optional, replication type of the new key. RATIS and STAND_ALONE are the options. Picks up the default from cluster configuration. - -{{< highlight bash >}} -ozone sh key cp /hive/jan sales.orc new_one.orc -{{< /highlight >}} -The above command will copy _sales.orc_ to _new\_one.orc_ in the bucket _/hive/jan_. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/shell/KeyCommands.zh.md b/hadoop-hdds/docs/content/shell/KeyCommands.zh.md deleted file mode 100644 index 2a36e7324f31..000000000000 --- a/hadoop-hdds/docs/content/shell/KeyCommands.zh.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: 键命令 -summary: 用键命令管理键/对象的生命周期 -weight: 4 ---- - - - -Ozone shell 提供以下键命令: - - * [下载](#下载) - * [上传](#上传) - * [删除](#删除) - * [查看](#查看) - * [列举](#列举) - * [重命名](#重命名) - * [Cat](#cat) - * [Cp](#cp) - - -### 下载 - -`key get` 命令从 Ozone 集群下载一个键到本地文件系统。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名,格式为 **/volume/bucket/key** -| FileName | 下载到本地后的文件名 - - -{{< highlight bash >}} -ozone sh key get /hive/jan/sales.orc sales.orc -{{< /highlight >}} - -从 _/hive/jan_ 桶中下载 sales.orc 文件,写入到本地名为 sales.orc 的文件。 - -### 上传 - -`key put` 命令从本地文件系统上传一个文件到指定的桶。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名,格式为 **/volume/bucket/key** -| FileName | 待上传的本地文件 -| -r, \-\-replication | 可选,上传后的副本数,合法值为 ONE 或者 THREE,如果不设置,将采用集群配置中的默认值。 -| -t, \-\-type | 可选,副本类型,合法值为 RATIS 或 STAND_ALONE,如果不设置,将采用集群配置中的默认值。 - -{{< highlight bash >}} -ozone sh key put /hive/jan/corrected-sales.orc sales.orc -{{< /highlight >}} - -上述命令将 sales.orc 文件作为新键上传到 _/hive/jan/corrected-sales.orc_ 。 - -### 删除 - -`key delete` 命令用来从桶中删除指定键。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名 - -{{< highlight bash >}} -ozone sh key delete /hive/jan/corrected-sales.orc -{{< /highlight >}} - -上述命令会将 _/hive/jan/corrected-sales.orc_ 这个键删除。 - - -### 查看 - -`key info` 命令返回指定键的信息。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名 - -{{< highlight bash >}} -ozone sh key info /hive/jan/sales.orc -{{< /highlight >}} - -上述命令会打印出 _/hive/jan/sales.orc_ 键的相关信息。 - -### 列举 - -用户通过 `key list` 命令列出一个桶中的所有键。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | 返回结果的最大数量,默认值为 100 -| -p, \-\-prefix | 可选,只有匹配指定前缀的键会被返回 -| -s, \-\-start | 从指定键开始列举 -| Uri | 桶名 - -{{< highlight bash >}} -ozone sh key list /hive/jan -{{< /highlight >}} - -此命令会列出 _/hive/jan_ 桶中的所有键。 - -### 重命名 - -`key rename` 命令用来修改指定桶中的已有键的键名。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 桶名,格式为 **/volume/bucket** -| FromKey | 旧的键名 -| ToKey | 新的键名 - -{{< highlight bash >}} -ozone sh key rename /hive/jan sales.orc new_name.orc -{{< /highlight >}} - -上述命令会将 _/hive/jan_ 桶中的 _sales.orc_ 重命名为 _new\_name.orc_ 。 - -### Cat - -`key cat` 命令用来把指定的键的内容输出到终端。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 键名,格式为 **/volume/bucket/key** - - -{{< highlight bash >}} -ozone sh key cat /hive/jan/hello.txt -{{< /highlight >}} -上述命令会将 _/hive/jan_ 桶中的 hello.txt 的内容输出到标准输出中来。 - -### Cp - -`key cp` 命令用来在同一个bucket下,从一个key复制出另一个key。 - -***Params:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 桶名 格式为**/volume/bucket**。 -| FromKey | 现有的键名 -| ToKey | 新的键名 -| -r, \-\-replication | 可选,上传后的副本数,合法值为 ONE 或者 THREE,如果不设置,将采用集群配置中的默认值。 -| -t, \-\-type | 可选,副本类型,合法值为 RATIS 或 STAND_ALONE,如果不设置,将采用集群配置中的默认值。 - -{{< highlight bash >}} -ozone sh key cp /hive/jan sales.orc new_one.orc -{{< /highlight >}} -上述命令会将 _/hive/jan_ 桶中的 _sales.orc_ 复制到 _new\_one.orc_ 。 \ No newline at end of file diff --git a/hadoop-hdds/docs/content/shell/VolumeCommands.md b/hadoop-hdds/docs/content/shell/VolumeCommands.md deleted file mode 100644 index fe459f313352..000000000000 --- a/hadoop-hdds/docs/content/shell/VolumeCommands.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Volume Commands -weight: 2 -summary: Volume commands help you to manage the life cycle of a volume. ---- - - -Volume commands generally need administrator privileges. The ozone shell supports the following volume commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [update](#update) - -### Create - -The `volume create` command allows an administrator to create a volume and -assign it to a user. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, \-\-user | Required, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume create --quota=1TB --user=bilbo /hive -{{< /highlight >}} - -The above command will create a volume called _hive_ on the ozone cluster. This -volume has a quota of 1TB, and the owner is _bilbo_. - -### Delete - -The `volume delete` command allows an administrator to delete a volume. If the -volume is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume delete /hive -{{< /highlight >}} - -The above command will delete the volume hive, if the volume has no buckets -inside it. - -### Info - -The `volume info` commands returns the information about the volume including -quota and owner information. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume info /hive -{{< /highlight >}} - -The above command will print out the information about hive volume. - -### List - -The `volume list` command will list the volumes accessible by a user. - -{{< highlight bash >}} -ozone sh volume list --user hadoop -{{< /highlight >}} - -When ACL is enabled, the above command will print out volumes that the user -hadoop has LIST permission to. When ACL is disabled, the above command will -print out all the volumes owned by the user hadoop. - -### Update - -The volume update command allows changing of owner and quota on a given volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, \-\-user | Optional, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume update --quota=10TB /hive -{{< /highlight >}} - -The above command updates the volume quota to 10TB. diff --git a/hadoop-hdds/docs/content/shell/VolumeCommands.zh.md b/hadoop-hdds/docs/content/shell/VolumeCommands.zh.md deleted file mode 100644 index 190e0994e74c..000000000000 --- a/hadoop-hdds/docs/content/shell/VolumeCommands.zh.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: 卷命令 -weight: 2 -summary: 用卷命令管理卷的生命周期 ---- - - -卷命令通常需要管理员权限,ozone shell 支持以下卷命令: - - * [创建](#创建) - * [删除](#删除) - * [查看](#查看) - * [列举](#列举) - * [更新](#更新) - -### 创建 - -管理员可以通过 `volume create` 命令创建一个卷并分配给一个用户。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | 可选,指明该卷在 Ozone 集群所能使用的最大空间,即限额。 | -| -u, \-\-user | 必需,指明该卷的所有者,此用户可以在该卷中创建桶和键。 | -| Uri | 卷名 | - -{{< highlight bash >}} -ozone sh volume create --quota=1TB --user=bilbo /hive -{{< /highlight >}} - -上述命令会在 ozone 集群中创建名为 _hive_ 的卷,卷的限额为 1TB,所有者为 _bilbo_ 。 - -### 删除 - -管理员可以通过 `volume delete` 命令删除一个卷,如果卷不为空,此命令将失败。 - -***参数*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 卷名 | - -{{< highlight bash >}} -ozone sh volume delete /hive -{{< /highlight >}} - -如果 hive 卷中不包含任何桶,上述命令将删除 hive 卷。 - -### 查看 - -通过 `volume info` 命令可以获取卷的限额和所有者信息。 - -***参数:*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| Uri | 卷名 | - -{{< highlight bash >}} -ozone sh volume info /hive -{{< /highlight >}} - -上述命令会打印出 hive 卷的相关信息。 - -### 列举 - -`volume list` 命令用来列举一个用户可以访问的所有卷。 - -{{< highlight bash >}} -ozone sh volume list --user hadoop -{{< /highlight >}} - -若 ACL 已启用,上述命令会打印出 hadoop 用户有 LIST 权限的所有卷。 -若 ACL 被禁用,上述命令会打印出 hadoop 用户拥有的所有卷。 - -### 更新 - -`volume update` 命令用来修改卷的所有者和限额。 - -***参数*** - -| 参数名 | 说明 | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | 可选,重新指定该卷在 Ozone 集群中的限额。 | -| -u, \-\-user | 可选,重新指定该卷的所有者 | -| Uri | 卷名 | - -{{< highlight bash >}} -ozone sh volume update --quota=10TB /hive -{{< /highlight >}} - -上述命令将 hive 卷的限额更新为 10TB。 diff --git a/hadoop-hdds/docs/content/shell/_index.zh.md b/hadoop-hdds/docs/content/shell/_index.zh.md deleted file mode 100644 index 0f6220b5f0e6..000000000000 --- a/hadoop-hdds/docs/content/shell/_index.zh.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 命令行接口 -menu: - main: - weight: 3 ---- - - - -{{}} - Ozone shell 是用户与 Ozone 进行交互的主要接口,它提供了操作 Ozone 的命令行接口。 -{{}} diff --git a/hadoop-hdds/docs/content/start/FromSource.md b/hadoop-hdds/docs/content/start/FromSource.md index 9ce0cc4b6a8f..80f47fb78f0b 100644 --- a/hadoop-hdds/docs/content/start/FromSource.md +++ b/hadoop-hdds/docs/content/start/FromSource.md @@ -22,18 +22,21 @@ weight: 30 {{< requirements >}} * Java 1.8 * Maven - * Protoc (2.5) {{< /requirements >}} - +planning to build sources yourself, you can safely skip this page. + + If you are a Hadoop ninja, and wise in the ways of Apache, you already know that a real Apache release is a source release. -If you want to build from sources, Please untar the source tarball and run -the ozone build command. This instruction assumes that you have all the +If you want to build from sources, Please untar the source tarball (or clone the latest code +from the [git repository](https://github.com/apache/hadoop-ozone)) and run the ozone build command. This instruction assumes that you have all the dependencies to build Hadoop on your build machine. If you need instructions on how to build Hadoop, please look at the Apache Hadoop Website. @@ -41,28 +44,27 @@ on how to build Hadoop, please look at the Apache Hadoop Website. mvn clean package -DskipTests=true ``` -This will build an ozone-\.tar.gz in your `hadoop-ozone/dist/target` directory. +This will build an `ozone-\` directory in your `hadoop-ozone/dist/target` directory. You can copy this tarball and use this instead of binary artifacts that are provided along with the official release. -## How to test the build - -You can run the acceptance tests in the hadoop-ozone directory to make sure -that your build is functional. To launch the acceptance tests, please follow - the instructions in the **README.md** in the `smoketest` directory. +To create tar file distribution, use the `-Pdist` profile: ```bash -cd smoketest -./test.sh +mvn clean package -DskipTests=true -Pdist ``` - You can also execute only a minimal subset of the tests: +## How to run Ozone from build + +When you have the new distribution, you can start a local cluster [with docker-compose]({{< ref "start/RunningViaDocker.md">}}). ```bash -cd smoketest -./test.sh --env ozone basic +cd hadoop-ozone/dist/target/ozone-X.X.X... +cd compose/ozone +docker-compose up -d ``` -Acceptance tests will start a small ozone cluster and verify that ozone shell and ozone file - system is fully functional. +## How to test the build + +`compose` subfolder contains multiple type of example setup (secure, non-secure, HA, Yarn). They can be tested with the help of [robotframework](http://robotframework.org/) with executing `test.sh` in any of the directories. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/start/FromSource.zh.md b/hadoop-hdds/docs/content/start/FromSource.zh.md index a1b9f372e5e8..ab740af73828 100644 --- a/hadoop-hdds/docs/content/start/FromSource.zh.md +++ b/hadoop-hdds/docs/content/start/FromSource.zh.md @@ -19,10 +19,15 @@ weight: 30 limitations under the License. --> +
+ +注意:本页面翻译的信息可能滞后,最新的信息请参看英文版的相关页面。 + +
+ {{< requirements >}} * Java 1.8 * Maven - * Protoc (2.5) {{< /requirements >}} - + diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index 38ce638f4d11..4e900bbe3785 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -14,6 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. --> +

SCM Information

+ + + + + + + + + + + +
Scm Id:{{$ctrl.overview.jmx.ScmId}}
Cluster Id:{{$ctrl.overview.jmx.ClusterId}}
+

Node counts

@@ -38,7 +52,18 @@

Status

- +
Node Manager: Safe mode status{{$ctrl.scmmetrics.InSafeMode}}{{$ctrl.overview.jmx.InSafeMode}}
+ +

Safemode rules statuses

+ + + + + + + + +
{{typestat.key}}{{typestat.value}}
\ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js index 2942a561dea3..b5acc19fa386 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js @@ -30,10 +30,6 @@ .then(function (result) { ctrl.nodemanagermetrics = result.data.beans[0]; }); - $http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=StorageContainerManagerInfo,component=ServerRuntime") - .then(function (result) { - ctrl.scmmetrics = result.data.beans[0]; - }); var statusSortOrder = { "HEALTHY": "a", diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index 64752dab5543..f4f17598ed0d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -222,20 +222,26 @@ public static StorageReportProto getRandomStorageReport(UUID nodeId, StorageTypeProto.DISK); } - /** - * Creates storage report with the given information. - * - * @param nodeId datanode id - * @param path storage dir - * @param capacity storage size - * @param used space used - * @param remaining space remaining - * @param type type of storage - * - * @return StorageReportProto - */ public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type) { + long capacity, long used, long remaining, StorageTypeProto type) { + return createStorageReport(nodeId, path, capacity, used, remaining, + type, false); + } + /** + * Creates storage report with the given information. + * + * @param nodeId datanode id + * @param path storage dir + * @param capacity storage size + * @param used space used + * @param remaining space remaining + * @param type type of storage + * + * @return StorageReportProto + */ + public static StorageReportProto createStorageReport(UUID nodeId, String path, + long capacity, long used, long remaining, StorageTypeProto type, + boolean failed) { Preconditions.checkNotNull(nodeId); Preconditions.checkNotNull(path); StorageReportProto.Builder srb = StorageReportProto.newBuilder(); @@ -243,6 +249,7 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path, .setStorageLocation(path) .setCapacity(capacity) .setScmUsed(used) + .setFailed(failed) .setRemaining(remaining); StorageTypeProto storageTypeProto = type == null ? StorageTypeProto.DISK : type; @@ -547,4 +554,17 @@ public static ContainerReplica getReplicas( .build(); } + public static Pipeline getRandomPipeline() { + List nodes = new ArrayList<>(); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + return Pipeline.newBuilder() + .setFactor(HddsProtos.ReplicationFactor.THREE) + .setId(PipelineID.randomId()) + .setNodes(nodes) + .setState(Pipeline.PipelineState.OPEN) + .setType(HddsProtos.ReplicationType.RATIS) + .build(); + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index e0ba53c7e94c..a72031c42496 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -298,6 +298,72 @@ public void testBlockDistribution() throws Exception { } } + + @Test + public void testBlockDistributionWithMultipleDisks() throws Exception { + int threadCount = numContainerPerOwnerInPipeline * + numContainerPerOwnerInPipeline; + nodeManager.setNumHealthyVolumes(numContainerPerOwnerInPipeline); + List executors = new ArrayList<>(threadCount); + for (int i = 0; i < threadCount; i++) { + executors.add(Executors.newSingleThreadExecutor()); + } + pipelineManager.createPipeline(type, factor); + TestUtils.openAllRatisPipelines(pipelineManager); + Map> allocatedBlockMap = + new ConcurrentHashMap<>(); + List> futureList = + new ArrayList<>(threadCount); + for (int i = 0; i < threadCount; i++) { + final CompletableFuture future = + new CompletableFuture<>(); + CompletableFuture.supplyAsync(() -> { + try { + List blockList; + AllocatedBlock block = blockManager + .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, + OzoneConsts.OZONE, + new ExcludeList()); + long containerId = block.getBlockID().getContainerID(); + if (!allocatedBlockMap.containsKey(containerId)) { + blockList = new ArrayList<>(); + } else { + blockList = allocatedBlockMap.get(containerId); + } + blockList.add(block); + allocatedBlockMap.put(containerId, blockList); + future.complete(block); + } catch (IOException e) { + future.completeExceptionally(e); + } + return future; + }, executors.get(i)); + futureList.add(future); + } + try { + CompletableFuture + .allOf(futureList.toArray( + new CompletableFuture[futureList.size()])).get(); + Assert.assertTrue( + pipelineManager.getPipelines(type).size() == 1); + Pipeline pipeline = pipelineManager.getPipelines(type).get(0); + // total no of containers to be created will be number of healthy + // volumes * number of numContainerPerOwnerInPipeline which is equal to + // the thread count + Assert.assertTrue(threadCount == pipelineManager. + getNumberOfContainers(pipeline.getId())); + Assert.assertTrue( + allocatedBlockMap.size() == threadCount); + Assert.assertTrue(allocatedBlockMap. + values().size() == threadCount); + allocatedBlockMap.values().stream().forEach(v -> { + Assert.assertTrue(v.size() == 1); + }); + } catch (Exception e) { + Assert.fail("testAllocateBlockInParallel failed"); + } + } + @Test public void testAllocateOversizedBlock() throws Exception { long size = 6 * GB; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 54f6ee43334c..4b8b37dee273 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -92,6 +92,7 @@ public class MockNodeManager implements NodeManager { private final Node2ContainerMap node2ContainerMap; private NetworkTopology clusterMap; private ConcurrentMap> dnsToUuidMap; + private int numHealthyDisksPerDatanode; public MockNodeManager(NetworkTopologyImpl clusterMap, List nodes, @@ -121,6 +122,7 @@ public MockNodeManager(NetworkTopologyImpl clusterMap, } safemode = false; this.commandMap = new HashMap<>(); + numHealthyDisksPerDatanode = 1; } public MockNodeManager(boolean initializeFakeNodes, int nodeCount) { @@ -388,6 +390,19 @@ public void clearCommandQueue(UUID dnId) { } } + public void setNodeState(DatanodeDetails dn, HddsProtos.NodeState state) { + healthyNodes.remove(dn); + staleNodes.remove(dn); + deadNodes.remove(dn); + if (state == HEALTHY) { + healthyNodes.add(dn); + } else if (state == STALE) { + staleNodes.add(dn); + } else { + deadNodes.add(dn); + } + } + /** * Closes this stream and releases any system resources associated with it. If * the stream is already closed then invoking this method has no effect. @@ -569,6 +584,15 @@ public void setNetworkTopology(NetworkTopology topology) { this.clusterMap = topology; } + @Override + public int getNumHealthyVolumes(List dnList) { + return numHealthyDisksPerDatanode; + } + + public void setNumHealthyVolumes(int value) { + numHealthyDisksPerDatanode = value; + } + /** * A class to declare some values for the nodes so that our tests * won't fail. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index c7ec835e55b9..9f308fa9738e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -39,11 +39,13 @@ import org.mockito.Mockito; import java.io.IOException; +import java.util.HashSet; import java.util.Iterator; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; +import static junit.framework.TestCase.assertEquals; import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas; import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; @@ -483,9 +485,167 @@ public void testQuasiClosedToClosed() Assert.assertEquals(LifeCycleState.CLOSED, containerOne.getState()); } + @Test + public void openContainerKeyAndBytesUsedUpdatedToMinimumOfAllReplicas() + throws SCMException { + final ContainerReportHandler reportHandler = new ContainerReportHandler( + nodeManager, containerManager); + final Iterator nodeIterator = nodeManager.getNodes( + NodeState.HEALTHY).iterator(); + + final DatanodeDetails datanodeOne = nodeIterator.next(); + final DatanodeDetails datanodeTwo = nodeIterator.next(); + final DatanodeDetails datanodeThree = nodeIterator.next(); + + final ContainerReplicaProto.State replicaState + = ContainerReplicaProto.State.OPEN; + final ContainerInfo containerOne = getContainer(LifeCycleState.OPEN); + + final Set containerIDSet = new HashSet<>(); + containerIDSet.add(containerOne.containerID()); + + containerStateManager.loadContainer(containerOne); + // Container loaded, no replicas reported from DNs. Expect zeros for + // usage values. + assertEquals(0L, containerOne.getUsedBytes()); + assertEquals(0L, containerOne.getNumberOfKeys()); + + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 50L, 60L), publisher); + + // Single replica reported - ensure values are updated + assertEquals(50L, containerOne.getUsedBytes()); + assertEquals(60L, containerOne.getNumberOfKeys()); + + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeTwo, 50L, 60L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 50L, 60L), publisher); + + // All 3 DNs are reporting the same values. Counts should be as expected. + assertEquals(50L, containerOne.getUsedBytes()); + assertEquals(60L, containerOne.getNumberOfKeys()); + + // Now each DN reports a different lesser value. Counts should be the min + // reported. + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 1L, 10L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeTwo, 2L, 11L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 3L, 12L), publisher); + + // All 3 DNs are reporting different values. The actual value should be the + // minimum. + assertEquals(1L, containerOne.getUsedBytes()); + assertEquals(10L, containerOne.getNumberOfKeys()); + + // Have the lowest value report a higher value and ensure the new value + // is the minimum + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 3L, 12L), publisher); + + assertEquals(2L, containerOne.getUsedBytes()); + assertEquals(11L, containerOne.getNumberOfKeys()); + } + + @Test + public void notOpenContainerKeyAndBytesUsedUpdatedToMaximumOfAllReplicas() + throws SCMException { + final ContainerReportHandler reportHandler = new ContainerReportHandler( + nodeManager, containerManager); + final Iterator nodeIterator = nodeManager.getNodes( + NodeState.HEALTHY).iterator(); + + final DatanodeDetails datanodeOne = nodeIterator.next(); + final DatanodeDetails datanodeTwo = nodeIterator.next(); + final DatanodeDetails datanodeThree = nodeIterator.next(); + + final ContainerReplicaProto.State replicaState + = ContainerReplicaProto.State.CLOSED; + final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSED); + + final Set containerIDSet = new HashSet<>(); + containerIDSet.add(containerOne.containerID()); + + containerStateManager.loadContainer(containerOne); + // Container loaded, no replicas reported from DNs. Expect zeros for + // usage values. + assertEquals(0L, containerOne.getUsedBytes()); + assertEquals(0L, containerOne.getNumberOfKeys()); + + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 50L, 60L), publisher); + + // Single replica reported - ensure values are updated + assertEquals(50L, containerOne.getUsedBytes()); + assertEquals(60L, containerOne.getNumberOfKeys()); + + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeTwo, 50L, 60L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 50L, 60L), publisher); + + // All 3 DNs are reporting the same values. Counts should be as expected. + assertEquals(50L, containerOne.getUsedBytes()); + assertEquals(60L, containerOne.getNumberOfKeys()); + + // Now each DN reports a different lesser value. Counts should be the max + // reported. + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeOne, 1L, 10L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeTwo, 2L, 11L), publisher); + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 3L, 12L), publisher); + + // All 3 DNs are reporting different values. The actual value should be the + // maximum. + assertEquals(3L, containerOne.getUsedBytes()); + assertEquals(12L, containerOne.getNumberOfKeys()); + + // Have the highest value report a lower value and ensure the new value + // is the new maximumu + reportHandler.onMessage(getContainerReportFromDatanode( + containerOne.containerID(), replicaState, + datanodeThree, 1L, 10L), publisher); + + assertEquals(2L, containerOne.getUsedBytes()); + assertEquals(11L, containerOne.getNumberOfKeys()); + } + + private ContainerReportFromDatanode getContainerReportFromDatanode( + ContainerID containerId, ContainerReplicaProto.State state, + DatanodeDetails dn, long bytesUsed, long keyCount) { + ContainerReportsProto containerReport = getContainerReportsProto( + containerId, state, dn.getUuidString(), bytesUsed, keyCount); + + return new ContainerReportFromDatanode(dn, containerReport); + } + private static ContainerReportsProto getContainerReportsProto( final ContainerID containerId, final ContainerReplicaProto.State state, final String originNodeId) { + return getContainerReportsProto(containerId, state, originNodeId, + 2000000000L, 100000000L); + } + + private static ContainerReportsProto getContainerReportsProto( + final ContainerID containerId, final ContainerReplicaProto.State state, + final String originNodeId, final long usedBytes, final long keyCount) { final ContainerReportsProto.Builder crBuilder = ContainerReportsProto.newBuilder(); final ContainerReplicaProto replicaProto = @@ -495,8 +655,8 @@ private static ContainerReportsProto getContainerReportsProto( .setOriginNodeId(originNodeId) .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") .setSize(5368709120L) - .setUsed(2000000000L) - .setKeyCount(100000000L) + .setUsed(usedBytes) + .setKeyCount(keyCount) .setReadCount(100000000L) .setWriteCount(100000000L) .setReadBytes(2000000000L) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java index efa333dd85e2..1af2f732a342 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -26,10 +26,16 @@ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher .IncrementalContainerReportFromDatanode; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -37,7 +43,10 @@ import org.mockito.Mockito; import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Set; +import java.util.UUID; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; @@ -55,9 +64,18 @@ public class TestIncrementalContainerReportHandler { @Before public void setup() throws IOException { - final ConfigurationSource conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); + final String path = + GenericTestUtils.getTempPath(UUID.randomUUID().toString()); + Path scmPath = Paths.get(path, "scm-meta"); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); this.containerManager = Mockito.mock(ContainerManager.class); - this.nodeManager = Mockito.mock(NodeManager.class); + NetworkTopology clusterMap = new NetworkTopologyImpl(conf); + EventQueue eventQueue = new EventQueue(); + SCMStorageConfig storageConfig = new SCMStorageConfig(conf); + this.nodeManager = + new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap); + this.containerStateManager = new ContainerStateManager(conf); this.publisher = Mockito.mock(EventPublisher.class); @@ -105,6 +123,9 @@ public void testClosingToClosed() throws IOException { final DatanodeDetails datanodeOne = randomDatanodeDetails(); final DatanodeDetails datanodeTwo = randomDatanodeDetails(); final DatanodeDetails datanodeThree = randomDatanodeDetails(); + nodeManager.register(datanodeOne, null, null); + nodeManager.register(datanodeTwo, null, null); + nodeManager.register(datanodeThree, null, null); final Set containerReplicas = getReplicas( container.containerID(), ContainerReplicaProto.State.CLOSING, @@ -139,6 +160,9 @@ public void testClosingToQuasiClosed() throws IOException { final DatanodeDetails datanodeOne = randomDatanodeDetails(); final DatanodeDetails datanodeTwo = randomDatanodeDetails(); final DatanodeDetails datanodeThree = randomDatanodeDetails(); + nodeManager.register(datanodeOne, null, null); + nodeManager.register(datanodeTwo, null, null); + nodeManager.register(datanodeThree, null, null); final Set containerReplicas = getReplicas( container.containerID(), ContainerReplicaProto.State.CLOSING, @@ -174,6 +198,9 @@ public void testQuasiClosedToClosed() throws IOException { final DatanodeDetails datanodeOne = randomDatanodeDetails(); final DatanodeDetails datanodeTwo = randomDatanodeDetails(); final DatanodeDetails datanodeThree = randomDatanodeDetails(); + nodeManager.register(datanodeOne, null, null); + nodeManager.register(datanodeTwo, null, null); + nodeManager.register(datanodeThree, null, null); final Set containerReplicas = getReplicas( container.containerID(), ContainerReplicaProto.State.CLOSING, @@ -212,6 +239,9 @@ public void testDeleteContainer() throws IOException { final DatanodeDetails datanodeOne = randomDatanodeDetails(); final DatanodeDetails datanodeTwo = randomDatanodeDetails(); final DatanodeDetails datanodeThree = randomDatanodeDetails(); + nodeManager.register(datanodeOne, null, null); + nodeManager.register(datanodeTwo, null, null); + nodeManager.register(datanodeThree, null, null); final Set containerReplicas = getReplicas( container.containerID(), ContainerReplicaProto.State.CLOSED, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java new file mode 100644 index 000000000000..5543be5832b1 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.metadata; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.junit.Test; + +import java.util.UUID; + +/** + * Testing serialization of PipelineID objects to/from RocksDB. + */ +public class TestPipelineIDCodec { + + @Test + public void testPersistingZeroAsUUID() throws Exception { + long leastSigBits = 0x0000_0000_0000_0000L; + long mostSigBits = 0x0000_0000_0000_0000L; + byte[] expected = new byte[] { + b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), + b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00), b(0x00) + }; + + checkPersisting(leastSigBits, mostSigBits, expected); + } + + @Test + public void testPersistingFFAsUUID() throws Exception { + long leastSigBits = 0xFFFF_FFFF_FFFF_FFFFL; + long mostSigBits = 0xFFFF_FFFF_FFFF_FFFFL; + byte[] expected = new byte[] { + b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), + b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF), b(0xFF) + }; + + checkPersisting(leastSigBits, mostSigBits, expected); + } + + @Test + public void testPersistingARandomUUID() throws Exception { + for (int i=0; i<100; i++) { + UUID uuid = UUID.randomUUID(); + + long mask = 0x0000_0000_0000_00FFL; + + byte[] expected = new byte[] { + b(((int) (uuid.getMostSignificantBits() >> 56 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 48 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 40 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 32 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 24 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 16 & mask))), + b(((int) (uuid.getMostSignificantBits() >> 8 & mask))), + b(((int) (uuid.getMostSignificantBits() & mask))), + + b(((int) (uuid.getLeastSignificantBits() >> 56 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 48 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 40 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 32 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 24 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 16 & mask))), + b(((int) (uuid.getLeastSignificantBits() >> 8 & mask))), + b(((int) (uuid.getLeastSignificantBits() & mask))), + }; + + checkPersisting( + uuid.getMostSignificantBits(), + uuid.getLeastSignificantBits(), + expected + ); + } + } + + @Test + public void testConvertAndReadBackZeroAsUUID() throws Exception { + long mostSigBits = 0x0000_0000_0000_0000L; + long leastSigBits = 0x0000_0000_0000_0000L; + UUID uuid = new UUID(mostSigBits, leastSigBits); + PipelineID pid = PipelineID.valueOf(uuid); + + byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid); + PipelineID decoded = new PipelineIDCodec().fromPersistedFormat(encoded); + + assertEquals(pid, decoded); + } + + @Test + public void testConvertAndReadBackFFAsUUID() throws Exception { + long mostSigBits = 0xFFFF_FFFF_FFFF_FFFFL; + long leastSigBits = 0xFFFF_FFFF_FFFF_FFFFL; + UUID uuid = new UUID(mostSigBits, leastSigBits); + PipelineID pid = PipelineID.valueOf(uuid); + + byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid); + PipelineID decoded = new PipelineIDCodec().fromPersistedFormat(encoded); + + assertEquals(pid, decoded); + } + + @Test + public void testConvertAndReadBackRandomUUID() throws Exception { + UUID uuid = UUID.randomUUID(); + PipelineID pid = PipelineID.valueOf(uuid); + + byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid); + PipelineID decoded = new PipelineIDCodec().fromPersistedFormat(encoded); + + assertEquals(pid, decoded); + } + + private void checkPersisting( + long mostSigBits, long leastSigBits, byte[] expected + ) throws Exception { + UUID uuid = new UUID(mostSigBits, leastSigBits); + PipelineID pid = PipelineID.valueOf(uuid); + + byte[] encoded = new PipelineIDCodec().toPersistedFormat(pid); + + assertArrayEquals(expected, encoded); + } + + private byte b(int i) { + return (byte) (i & 0x0000_00FF); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index 6a6d3284465b..f05be767e717 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.node; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import java.io.File; @@ -162,7 +163,9 @@ public void testOnMessage() throws Exception { LambdaTestUtils.await(120000, 1000, () -> { pipelineManager.triggerPipelineCreation(); - return pipelineManager.getPipelines(RATIS, THREE).size() == 3; + System.out.println(pipelineManager.getPipelines(RATIS, THREE).size()); + System.out.println(pipelineManager.getPipelines(RATIS, ONE).size()); + return pipelineManager.getPipelines(RATIS, THREE).size() > 3; }); TestUtils.openAllRatisPipelines(pipelineManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java index 7b4d841fe76f..69b031c552f1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.Event; @@ -56,9 +57,9 @@ public void resetEventCollector() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class); Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1"); + NetworkTopology clusterMap = new NetworkTopologyImpl(conf); nodeManager = - new SCMNodeManager(conf, storageConfig, new EventQueue(), Mockito.mock( - NetworkTopology.class)); + new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap); nodeReportHandler = new NodeReportHandler(nodeManager); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index b167a38b7254..7a58d46ab68e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -848,11 +848,12 @@ public void testScmStatsFromNodeReport() final long capacity = 2000; final long used = 100; final long remaining = capacity - used; - + List dnList = new ArrayList<>(nodeCount); try (SCMNodeManager nodeManager = createNodeManager(conf)) { EventQueue eventQueue = (EventQueue) scm.getEventQueue(); for (int x = 0; x < nodeCount; x++) { DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dnList.add(dn); UUID dnId = dn.getUuid(); long free = capacity - used; String storagePath = testDir.getAbsolutePath() + "/" + dnId; @@ -871,9 +872,57 @@ public void testScmStatsFromNodeReport() .getScmUsed().get()); assertEquals(remaining * nodeCount, (long) nodeManager.getStats() .getRemaining().get()); + assertEquals(1, nodeManager.getNumHealthyVolumes(dnList)); + dnList.clear(); } } + /** + * Test multiple nodes sending initial heartbeat with their node report + * with multiple volumes. + * + * @throws IOException + * @throws InterruptedException + * @throws TimeoutException + */ + @Test + public void tesVolumeInfoFromNodeReport() + throws IOException, InterruptedException, AuthenticationException { + OzoneConfiguration conf = getConf(); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, + MILLISECONDS); + final int volumeCount = 10; + final long capacity = 2000; + final long used = 100; + List dnList = new ArrayList<>(1); + try (SCMNodeManager nodeManager = createNodeManager(conf)) { + EventQueue eventQueue = (EventQueue) scm.getEventQueue(); + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dnList.add(dn); + UUID dnId = dn.getUuid(); + long free = capacity - used; + List reports = new ArrayList<>(volumeCount); + boolean failed = true; + for (int x = 0; x < volumeCount; x++) { + String storagePath = testDir.getAbsolutePath() + "/" + dnId; + reports.add(TestUtils + .createStorageReport(dnId, storagePath, capacity, + used, free, null, failed)); + failed = !failed; + } + nodeManager.register(dn, TestUtils.createNodeReport(reports), null); + nodeManager.processHeartbeat(dn); + //TODO: wait for EventQueue to be processed + eventQueue.processAll(8000L); + + assertEquals(1, nodeManager.getNodeCount(HEALTHY)); + assertEquals(volumeCount / 2, + nodeManager.getNumHealthyVolumes(dnList)); + dnList.clear(); + } + } + + /** * Test single node stat update based on nodereport from different heartbeat * status (healthy, stale and dead). @@ -1133,6 +1182,8 @@ public void testScmRegisterNodeWith4LayerNetworkTopology() List nodeList = nodeManager.getAllNodes(); nodeList.stream().forEach(node -> Assert.assertTrue(node.getNetworkLocation().startsWith("/rack1/ng"))); + nodeList.stream().forEach(node -> + Assert.assertTrue(node.getParent() != null)); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 8d6a28cc2a0d..1274608c39c2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -44,7 +44,9 @@ import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -439,6 +441,85 @@ public void testValidatePlacementPolicySingleRackInCluster() { assertEquals(0, status.misReplicationCount()); } + @Test + public void test3NodesInSameRackReturnedWhenOnlyOneHealthyRackIsPresent() + throws Exception { + List dns = setupSkewedRacks(); + + int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); + // Set the only node on rack1 stale. This makes the cluster effectively a + // single rack. + nodeManager.setNodeState(dns.get(0), HddsProtos.NodeState.STALE); + + // As there is only 1 rack alive, the 3 DNs on /rack2 should be returned + List pickedDns = placementPolicy.chooseDatanodes( + new ArrayList<>(), new ArrayList<>(), nodesRequired, 0); + + assertEquals(3, pickedDns.size()); + assertTrue(pickedDns.contains(dns.get(1))); + assertTrue(pickedDns.contains(dns.get(2))); + assertTrue(pickedDns.contains(dns.get(3))); + } + + @Rule + public ExpectedException thrownExp = ExpectedException.none(); + + @Test + public void testExceptionIsThrownWhenRackAwarePipelineCanNotBeCreated() + throws Exception { + thrownExp.expect(SCMException.class); + thrownExp.expectMessage(PipelinePlacementPolicy.MULTIPLE_RACK_PIPELINE_MSG); + + List dns = setupSkewedRacks(); + + // Set the first node to its pipeline limit. This means there are only + // 3 hosts on a single rack available for new pipelines + insertHeavyNodesIntoNodeManager(dns, 1); + int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); + + placementPolicy.chooseDatanodes( + new ArrayList<>(), new ArrayList<>(), nodesRequired, 0); + } + + @Test + public void testExceptionThrownRackAwarePipelineCanNotBeCreatedExcludedNode() + throws Exception { + thrownExp.expect(SCMException.class); + thrownExp.expectMessage(PipelinePlacementPolicy.MULTIPLE_RACK_PIPELINE_MSG); + + List dns = setupSkewedRacks(); + + // Set the first node to its pipeline limit. This means there are only + // 3 hosts on a single rack available for new pipelines + insertHeavyNodesIntoNodeManager(dns, 1); + int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); + + List excluded = new ArrayList<>(); + excluded.add(dns.get(0)); + placementPolicy.chooseDatanodes( + excluded, new ArrayList<>(), nodesRequired, 0); + } + + private List setupSkewedRacks() { + cluster = initTopology(); + + List dns = new ArrayList<>(); + dns.add(MockDatanodeDetails + .createDatanodeDetails("host1", "/rack1")); + dns.add(MockDatanodeDetails + .createDatanodeDetails("host2", "/rack2")); + dns.add(MockDatanodeDetails + .createDatanodeDetails("host3", "/rack2")); + dns.add(MockDatanodeDetails + .createDatanodeDetails("host4", "/rack2")); + + nodeManager = new MockNodeManager(cluster, dns, + false, PIPELINE_PLACEMENT_MAX_NODES_COUNT); + placementPolicy = new PipelinePlacementPolicy( + nodeManager, stateManager, conf); + return dns; + } + private boolean checkDuplicateNodesUUID(List nodes) { HashSet uuids = nodes.stream(). map(DatanodeDetails::getUuid). diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index 7c2f17e85840..25957d8d28d1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -21,9 +21,14 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; import java.util.Set; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; @@ -37,11 +42,15 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.metadata.PipelineIDCodec; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.Table.KeyValue; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; @@ -50,13 +59,24 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; + +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.After; import org.junit.Assert; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import org.junit.Before; import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.mockito.InOrder; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.slf4j.event.Level.INFO; /** * Test cases to verify PipelineManager. @@ -285,6 +305,8 @@ public void testPipelineCreationFailedMetric() throws Exception { "NumPipelineCreationFailed", metrics); Assert.assertEquals(0, numPipelineCreateFailed); + LogCapturer logs = LogCapturer.captureLogs(SCMPipelineManager.getLog()); + GenericTestUtils.setLogLevel(SCMPipelineManager.getLog(), INFO); //This should fail... try { pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, @@ -294,6 +316,10 @@ public void testPipelineCreationFailedMetric() throws Exception { // pipeline creation failed this time. Assert.assertEquals(SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE, ioe.getResult()); + Assert.assertFalse(logs.getOutput().contains( + "Failed to create pipeline of type")); + } finally { + logs.stopCapturing(); } metrics = getMetrics( @@ -539,6 +565,182 @@ public void testSafeModeUpdatedOnSafemodeExit() pipelineManager.close(); } + /** + * This test was created for HDDS-3925 to check whether the db handling is + * proper at the SCMPipelineManager level. We should remove this test + * when we remove the key swap from the SCMPipelineManager code. + * + * The test emulates internally the values that the iterator will provide + * back to the check-fix code path. The iterator internally deserialize the + * key stored in RocksDB using the PipelineIDCodec. The older version of the + * codec serialized the PipelineIDs by taking the byte[] representation of + * the protobuf representation of the PipelineID, and deserialization was not + * implemented. + * + * In order to be able to check and fix the change, the deserialization was + * introduced, and deserialisation of the old protobuf byte representation + * with the new deserialization logic of the keys are + * checked against the PipelineID serialized in the value as well via + * protobuf. + * The DB is storing the keys now based on a byte[] serialized from the UUID + * inside the PipelineID. + * For this we emulate the getKey of the KeyValue returned by the + * iterator to return a PipelineID that is deserialized from the byte[] + * representation of the protobuf representation of the PipelineID in the + * test, as that would be the value we get from the iterator when iterating + * through a table with the old key format. + * + * @throws Exception when something goes wrong + */ + @Test + public void testPipelineDBKeyFormatChange() throws Exception { + Pipeline p1 = pipelineStub(); + Pipeline p2 = pipelineStub(); + Pipeline p3 = pipelineStub(); + + TableIterator> iteratorMock = + mock(TableIterator.class); + + KeyValue kv1 = + mockKeyValueToProvideOldKeyFormat(p1); + KeyValue kv2 = + mockKeyValueToProvideNormalFormat(p2); + KeyValue kv3 = + mockKeyValueToProvideOldKeyFormat(p3); + + when(iteratorMock.next()) + .thenReturn(kv1, kv2, kv3) + .thenThrow(new NoSuchElementException()); + when(iteratorMock.hasNext()) + .thenReturn(true, true, true, false); + + Table pipelineStore = mock(Table.class); + doReturn(iteratorMock).when(pipelineStore).iterator(); + when(pipelineStore.isEmpty()).thenReturn(false); + + InOrder inorderVerifier = inOrder(pipelineStore, iteratorMock); + + new SCMPipelineManager(conf, nodeManager, pipelineStore, new EventQueue()); + + inorderVerifier.verify(iteratorMock).removeFromDB(); + inorderVerifier.verify(pipelineStore).put(p1.getId(), p1); + inorderVerifier.verify(iteratorMock).removeFromDB(); + inorderVerifier.verify(pipelineStore).put(p3.getId(), p3); + + verify(pipelineStore, never()).put(p2.getId(), p2); + } + + @Test + public void testScmWithPipelineDBKeyFormatChange() throws Exception { + TemporaryFolder tempDir = new TemporaryFolder(); + tempDir.create(); + File dir = tempDir.newFolder(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); + + SCMMetadataStore scmDbWithOldKeyFormat = null; + Map oldPipelines = new HashMap<>(); + try { + scmDbWithOldKeyFormat = + new TestSCMStoreImplWithOldPipelineIDKeyFormat(conf); + // Create 3 pipelines. + for (int i = 0; i < 3; i++) { + Pipeline pipeline = pipelineStub(); + scmDbWithOldKeyFormat.getPipelineTable() + .put(pipeline.getId(), pipeline); + oldPipelines.put(pipeline.getId().getId(), pipeline); + } + } finally { + if (scmDbWithOldKeyFormat != null) { + scmDbWithOldKeyFormat.stop(); + } + } + + LogCapturer logCapturer = + LogCapturer.captureLogs(SCMPipelineManager.getLog()); + + // Create SCMPipelineManager with new DBDefinition. + SCMMetadataStore newScmMetadataStore = null; + try { + newScmMetadataStore = new SCMMetadataStoreImpl(conf); + SCMPipelineManager pipelineManager = new SCMPipelineManager(conf, + nodeManager, + newScmMetadataStore.getPipelineTable(), + new EventQueue()); + + waitForLog(logCapturer); + assertEquals(3, pipelineManager.getPipelines().size()); + oldPipelines.values().forEach(p -> + pipelineManager.containsPipeline(p.getId())); + } finally { + newScmMetadataStore.stop(); + } + + // Mimicking another restart. + try { + logCapturer.clearOutput(); + newScmMetadataStore = new SCMMetadataStoreImpl(conf); + SCMPipelineManager pipelineManager = new SCMPipelineManager(conf, + nodeManager, + newScmMetadataStore.getPipelineTable(), + new EventQueue()); + try { + waitForLog(logCapturer); + Assert.fail("Unexpected log: " + logCapturer.getOutput()); + } catch (TimeoutException ex) { + Assert.assertTrue(ex.getMessage().contains("Timed out")); + } + assertEquals(3, pipelineManager.getPipelines().size()); + oldPipelines.values().forEach(p -> + pipelineManager.containsPipeline(p.getId())); + } finally { + newScmMetadataStore.stop(); + } + } + + private static void waitForLog(LogCapturer logCapturer) + throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor(() -> logCapturer.getOutput() + .contains("Found pipeline in old format key"), + 1000, 5000); + } + + private Pipeline pipelineStub() { + return Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setType(HddsProtos.ReplicationType.RATIS) + .setFactor(HddsProtos.ReplicationFactor.ONE) + .setState(Pipeline.PipelineState.OPEN) + .setNodes( + Arrays.asList( + nodeManager.getNodes(HddsProtos.NodeState.HEALTHY).get(0) + ) + ) + .setNodesInOrder(Arrays.asList(0)) + .build(); + } + + private KeyValue + mockKeyValueToProvideOldKeyFormat(Pipeline pipeline) + throws IOException { + KeyValue kv = mock(KeyValue.class); + when(kv.getValue()).thenReturn(pipeline); + when(kv.getKey()) + .thenReturn( + new PipelineIDCodec().fromPersistedFormat( + pipeline.getId().getProtobuf().toByteArray() + )); + return kv; + } + + private KeyValue + mockKeyValueToProvideNormalFormat(Pipeline pipeline) + throws IOException { + KeyValue kv = mock(KeyValue.class); + when(kv.getValue()).thenReturn(pipeline); + when(kv.getKey()).thenReturn(pipeline.getId()); + return kv; + } + private void sendPipelineReport(DatanodeDetails dn, Pipeline pipeline, PipelineReportHandler pipelineReportHandler, boolean isLeader, EventQueue eventQueue) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java new file mode 100644 index 000000000000..a04ecea75041 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline; + +import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.PIPELINES; + +import java.io.IOException; +import java.math.BigInteger; +import java.security.cert.X509Certificate; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.metadata.PipelineCodec; +import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; +import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore; +import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; +import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; + +/** + * Test SCM Metadata Store that has ONLY the pipeline table whose key uses the + * old codec format. + */ +public class TestSCMStoreImplWithOldPipelineIDKeyFormat + implements SCMMetadataStore { + + private DBStore store; + private final OzoneConfiguration configuration; + private Table pipelineTable; + + public TestSCMStoreImplWithOldPipelineIDKeyFormat( + OzoneConfiguration config) throws IOException { + this.configuration = config; + start(configuration); + } + + @Override + public void start(OzoneConfiguration config) + throws IOException { + if (this.store == null) { + this.store = DBStoreBuilder.createDBStore(config, + new SCMDBTestDefinition()); + pipelineTable = PIPELINES.getTable(store); + } + } + + @Override + public void stop() throws Exception { + if (store != null) { + store.close(); + store = null; + } + } + + @Override + public DBStore getStore() { + return null; + } + + @Override + public Table getDeletedBlocksTXTable() { + return null; + } + + @Override + public Long getCurrentTXID() { + return null; + } + + @Override + public Long getNextDeleteBlockTXID() { + return null; + } + + @Override + public Table getValidCertsTable() { + return null; + } + + @Override + public Table getRevokedCertsTable() { + return null; + } + + @Override + public TableIterator getAllCerts(CertificateStore.CertType certType) { + return null; + } + + @Override + public Table getPipelineTable() { + return pipelineTable; + } + + @Override + public BatchOperationHandler getBatchHandler() { + return null; + } + + @Override + public Table getContainerTable() { + return null; + } + + /** + * Test SCM DB Definition for the above class. + */ + public static class SCMDBTestDefinition implements DBDefinition { + + public static final DBColumnFamilyDefinition + PIPELINES = + new DBColumnFamilyDefinition<>( + "pipelines", + PipelineID.class, + new OldPipelineIDCodec(), + Pipeline.class, + new PipelineCodec()); + + @Override + public String getName() { + return "scm.db"; + } + + @Override + public String getLocationConfigKey() { + return ScmConfigKeys.OZONE_SCM_DB_DIRS; + } + + @Override + public DBColumnFamilyDefinition[] getColumnFamilies() { + return new DBColumnFamilyDefinition[] {PIPELINES}; + } + } + + /** + * Old Pipeline ID codec that relies on protobuf serialization. + */ + public static class OldPipelineIDCodec implements Codec { + @Override + public byte[] toPersistedFormat(PipelineID object) throws IOException { + return object.getProtobuf().toByteArray(); + } + + @Override + public PipelineID fromPersistedFormat(byte[] rawData) throws IOException { + return null; + } + + @Override + public PipelineID copyObject(PipelineID object) { + throw new UnsupportedOperationException(); + } + } + +} + diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java new file mode 100644 index 000000000000..804c5bb98a3f --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; +import org.apache.hadoop.hdds.scm.PipelineRequestInformation; +import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.PipelineChoosePolicyFactory.OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT; + +/** + * Test for scm pipeline choose policy factory. + */ +public class TestPipelineChoosePolicyFactory { + + private OzoneConfiguration conf; + + private ScmConfig scmConfig; + + @Before + public void setup() { + //initialize network topology instance + conf = new OzoneConfiguration(); + scmConfig = conf.getObject(ScmConfig.class); + } + + @Test + public void testDefaultPolicy() throws IOException { + PipelineChoosePolicy policy = PipelineChoosePolicyFactory + .getPolicy(conf); + Assert.assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, + policy.getClass()); + } + + + /** + * A dummy pipeline choose policy implementation for test. + */ + public static class DummyImpl implements PipelineChoosePolicy { + + public DummyImpl(String dummy) { + } + + @Override + public Pipeline choosePipeline(List pipelineList, + PipelineRequestInformation pri) { + return null; + } + } + + @Test + public void testConstuctorNotFound() throws SCMException { + // set a policy class which does't have the right constructor implemented + scmConfig.setPipelineChoosePolicyName(DummyImpl.class.getName()); + PipelineChoosePolicy policy = PipelineChoosePolicyFactory.getPolicy(conf); + Assert.assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, + policy.getClass()); + } + + @Test + public void testClassNotImplemented() throws SCMException { + // set a placement class not implemented + conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, + "org.apache.hadoop.hdds.scm.pipeline.choose.policy.HelloWorld"); + PipelineChoosePolicy policy = PipelineChoosePolicyFactory.getPolicy(conf); + Assert.assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, + policy.getClass()); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 9ca3f18c0c75..a9b879f86ec7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -343,4 +343,9 @@ public List getNodesByAddress(String address) { public NetworkTopology getClusterNetworkTopologyMap() { return null; } + + @Override + public int getNumHealthyVolumes(List dnList) { + return 0; + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java index 3625e3475d4a..7576e8babd45 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; @@ -45,7 +45,6 @@ import static org.junit.Assert.assertEquals; import org.junit.BeforeClass; import org.junit.Test; -import org.mockito.Mockito; /** * Test cases to verify the metrics exposed by SCMNodeManager. @@ -64,7 +63,7 @@ public static void setup() throws Exception { SCMStorageConfig config = new SCMStorageConfig(NodeType.DATANODE, new File("/tmp"), "storage"); nodeManager = new SCMNodeManager(source, config, publisher, - Mockito.mock(NetworkTopology.class)); + new NetworkTopologyImpl(source)); registeredDatanode = DatanodeDetails.newBuilder() .setHostName("localhost") diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index 831a22b1275c..ceed47ff4513 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-test-utils - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Test Utils Apache Hadoop HDDS Test Utils jar diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index f362a0bfea61..dfda5a668fdd 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-hdds-tools - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Distributed Data Store Tools Apache Hadoop HDDS Tools jar @@ -66,6 +66,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-cli commons-cli + + log4j + log4j + + + org.kohsuke.metainf-services + metainf-services + org.xerial sqlite-jdbc diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java new file mode 100644 index 000000000000..aca8a4cf8d79 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.util.NativeCodeLoader; + +import org.apache.log4j.ConsoleAppender; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; +import picocli.CommandLine; + +/** + * Ozone Admin Command line tool. + */ +@CommandLine.Command(name = "ozone admin", + hidden = true, + description = "Developer tools for Ozone Admin operations", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class OzoneAdmin extends GenericCli { + + private OzoneConfiguration ozoneConf; + + public OzoneAdmin() { + super(OzoneAdmin.class); + } + + public OzoneConfiguration getOzoneConf() { + if (ozoneConf == null) { + ozoneConf = createOzoneConfiguration(); + } + return ozoneConf; + } + + /** + * Main for the Ozone Admin shell Command handling. + * + * @param argv - System Args Strings[] + */ + public static void main(String[] argv) { + LogManager.resetConfiguration(); + Logger.getRootLogger().setLevel(Level.INFO); + Logger.getRootLogger() + .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); + Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); + + new OzoneAdmin().run(argv); + } +} diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/package-info.java similarity index 84% rename from hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep rename to hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/package-info.java index ff1232e5fcaa..82fbd722932e 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/package-info.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,12 +6,17 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - */ \ No newline at end of file + */ + +/** + * Command-line tools for HDDS. + */ +package org.apache.hadoop.hdds.cli; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java index fcb9ad6b2f6a..cd5aba3a82e2 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,12 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Spec; /** @@ -41,21 +42,21 @@ ReplicationManagerStopSubcommand.class, ReplicationManagerStatusSubcommand.class }) -public class ReplicationManagerCommands implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class ReplicationManagerCommands implements Callable, + SubcommandWithParent { @Spec private CommandSpec spec; - @ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + + @Override + public Class getParentType() { + return OzoneAdmin.class; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java index 1adec6b0c4b4..ff82b82ec87a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,32 +22,25 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; -import java.util.concurrent.Callable; +import java.io.IOException; /** - * This is the handler that process safe mode check command. + * Handler to start replication manager. */ @Command( name = "start", description = "Start ReplicationManager", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ReplicationManagerStartSubcommand implements Callable { +public class ReplicationManagerStartSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ReplicationManagerStartSubcommand.class); - @ParentCommand - private ReplicationManagerCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.startReplicationManager(); - LOG.info("Starting ReplicationManager..."); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.startReplicationManager(); + LOG.info("Starting ReplicationManager..."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java index 2ebf28c80741..c6800befd8cd 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,39 +22,31 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; -import java.util.concurrent.Callable; +import java.io.IOException; /** - * This is the handler that process safe mode check command. + * Handler to query status of replication manager. */ @Command( name = "status", description = "Check if ReplicationManager is running or not", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ReplicationManagerStatusSubcommand implements Callable { +public class ReplicationManagerStatusSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ReplicationManagerStatusSubcommand.class); - @ParentCommand - private ReplicationManagerCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - boolean execReturn = scmClient.getReplicationManagerStatus(); - - // Output data list - if(execReturn){ - LOG.info("ReplicationManager is Running."); - } else { - LOG.info("ReplicationManager is Not Running."); - } - return null; + public void execute(ScmClient scmClient) throws IOException { + boolean execReturn = scmClient.getReplicationManagerStatus(); + + // Output data list + if(execReturn){ + LOG.info("ReplicationManager is Running."); + } else { + LOG.info("ReplicationManager is Not Running."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java index 7cafd01b12d6..7d3063a7636c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,34 +22,27 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; -import java.util.concurrent.Callable; +import java.io.IOException; /** - * This is the handler that process safe mode check command. + * Handler to stop replication manager. */ @Command( name = "stop", description = "Stop ReplicationManager", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ReplicationManagerStopSubcommand implements Callable { +public class ReplicationManagerStopSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ReplicationManagerStopSubcommand.class); - @ParentCommand - private ReplicationManagerCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.stopReplicationManager(); - LOG.info("Stopping ReplicationManager..."); - LOG.info("Requested SCM to stop ReplicationManager, " + - "it might take sometime for the ReplicationManager to stop."); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.stopReplicationManager(); + LOG.info("Stopping ReplicationManager..."); + LOG.info("Requested SCM to stop ReplicationManager, " + + "it might take sometime for the ReplicationManager to stop."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java index b2cfea3daaaa..ba359af1c59b 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdds.scm.cli; +import java.io.IOException; import java.util.Map; -import java.util.concurrent.Callable; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.HddsVersionProvider; @@ -28,7 +28,6 @@ import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process safe mode check command. @@ -38,39 +37,32 @@ description = "Check if SCM is in safe mode", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class SafeModeCheckSubcommand implements Callable { +public class SafeModeCheckSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(SafeModeCheckSubcommand.class); - @ParentCommand - private SafeModeCommands parent; - @CommandLine.Option(names = {"--verbose"}, description = "Show detailed status of rules.") private boolean verbose; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - boolean execReturn = scmClient.inSafeMode(); + public void execute(ScmClient scmClient) throws IOException { + boolean execReturn = scmClient.inSafeMode(); - // Output data list - if(execReturn){ - LOG.info("SCM is in safe mode."); - if (verbose) { - for (Map.Entry> entry : - scmClient.getSafeModeRuleStatuses().entrySet()) { - Pair value = entry.getValue(); - LOG.info("validated:{}, {}, {}", - value.getLeft(), entry.getKey(), value.getRight()); - } + // Output data list + if(execReturn){ + LOG.info("SCM is in safe mode."); + if (verbose) { + for (Map.Entry> entry : + scmClient.getSafeModeRuleStatuses().entrySet()) { + Pair value = entry.getValue(); + LOG.info("validated:{}, {}, {}", + value.getLeft(), entry.getKey(), value.getRight()); } - } else { - LOG.info("SCM is out of safe mode."); } - return null; + } else { + LOG.info("SCM is out of safe mode."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java index 017e1ba3c2c7..6ba7cf295470 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,12 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Spec; /** @@ -43,24 +42,20 @@ SafeModeExitSubcommand.class, SafeModeWaitSubcommand.class }) -public class SafeModeCommands implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeCommands.class); +@MetaInfServices(SubcommandWithParent.class) +public class SafeModeCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; - @ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + + @Override + public Class getParentType() { + return OzoneAdmin.class; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java index 9f1db45bb4e2..12490c5c2c51 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.scm.cli; -import java.util.concurrent.Callable; +import java.io.IOException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -25,7 +25,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process safe mode exit command. @@ -35,23 +34,16 @@ description = "Force SCM out of safe mode", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class SafeModeExitSubcommand implements Callable { +public class SafeModeExitSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(SafeModeExitSubcommand.class); - @ParentCommand - private SafeModeCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - boolean execReturn = scmClient.forceExitSafeMode(); - if(execReturn){ - LOG.info("SCM exit safe mode successfully."); - } - return null; + public void execute(ScmClient scmClient) throws IOException { + boolean execReturn = scmClient.forceExitSafeMode(); + if(execReturn){ + LOG.info("SCM exit safe mode successfully."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java index 7668a47d24d2..e3fb5c1e718e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,7 @@ import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; +import picocli.CommandLine.Mixin; /** * This is the handler that process safe mode wait command. @@ -45,21 +45,20 @@ public class SafeModeWaitSubcommand implements Callable { @Option(description = "Define timeout (in second) to wait until (exit code 1) " + "or until safemode is ended (exit code 0).", defaultValue = "30", - required = false, names = { - "-t", "--timeout"}) + names = { "-t", "--timeout"}) private long timeoutSeconds; private long startTestTime; - @ParentCommand - private SafeModeCommands parent; + @Mixin + private ScmOption scmOption; @Override public Void call() throws Exception { startTestTime = System.currentTimeMillis(); while (getRemainingTimeInSec() > 0) { - try (ScmClient scmClient = parent.getParent().createScmClient()) { + try (ScmClient scmClient = scmOption.createScmClient()) { while (getRemainingTimeInSec() > 0) { boolean isSafeModeActive = scmClient.inSafeMode(); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java new file mode 100644 index 000000000000..5b8b81436c78 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.cli.GenericParentCommand; +import org.apache.hadoop.hdds.conf.MutableConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import picocli.CommandLine; + +import java.io.IOException; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; +import static picocli.CommandLine.Spec.Target.MIXEE; + +/** + * Defines command-line option for SCM address. + */ +public class ScmOption { + + @CommandLine.Spec(MIXEE) + private CommandLine.Model.CommandSpec spec; + + @CommandLine.Option(names = {"--scm"}, + description = "The destination scm (host:port)") + private String scm; + + public ScmClient createScmClient() { + try { + GenericParentCommand parent = (GenericParentCommand) + spec.root().userObject(); + OzoneConfiguration conf = parent.createOzoneConfiguration(); + checkAndSetSCMAddressArg(conf); + + return new ContainerOperationClient(conf); + } catch (IOException ex) { + throw new IllegalArgumentException("Can't create SCM client", ex); + } + } + + private void checkAndSetSCMAddressArg(MutableConfigurationSource conf) { + if (StringUtils.isNotEmpty(scm)) { + conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm); + } + if (!HddsUtils.getHostNameFromConfigKeys(conf, + ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) { + + throw new IllegalArgumentException( + ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + + " should be set in ozone-site.xml or with the --scm option"); + } + } + +} diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java similarity index 60% rename from hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java rename to hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java index efd6092a0b16..6dc09c2cbecd 100644 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,24 +15,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.upgrade; +package org.apache.hadoop.hdds.scm.cli; -import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import picocli.CommandLine; -import picocli.CommandLine.Command; +import java.io.IOException; +import java.util.concurrent.Callable; /** - * Command to calculate statistics and estimate the upgrade. + * Base class for admin commands that connect via SCM client. */ -@Command(name = "plan", - description = "Plan existing HDFS block distribution and give." - + "estimation.") -public class Plan implements Callable { +public abstract class ScmSubcommand implements Callable { + + @CommandLine.Mixin + private ScmOption scmOption; + + protected abstract void execute(ScmClient client) throws IOException; @Override - public Void call() throws Exception { - System.err.println("[In-Place upgrade : plan] is not yet supported."); - return null; + public final Void call() throws Exception { + try (ScmClient scmClient = scmOption.createScmClient()) { + execute(scmClient); + return null; + } } - } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java index 214da34561b5..c1aebaeec22f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,18 +18,19 @@ package org.apache.hadoop.hdds.scm.cli; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.TreeSet; -import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; import org.apache.hadoop.hdds.scm.client.ScmClient; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; @@ -37,9 +38,9 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; + +import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Handler of printTopology command. @@ -49,22 +50,18 @@ description = "Print a tree of the network topology as reported by SCM", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class TopologySubcommand implements Callable { - - @Spec - private CommandSpec spec; - - @CommandLine.ParentCommand - private WithScmClient parent; +@MetaInfServices(SubcommandWithParent.class) +public class TopologySubcommand extends ScmSubcommand + implements SubcommandWithParent { - private static List stateArray = new ArrayList<>(); + private static final List STATES = new ArrayList<>(); static { - stateArray.add(HEALTHY); - stateArray.add(STALE); - stateArray.add(DEAD); - stateArray.add(DECOMMISSIONING); - stateArray.add(DECOMMISSIONED); + STATES.add(HEALTHY); + STATES.add(STALE); + STATES.add(DEAD); + STATES.add(DECOMMISSIONING); + STATES.add(DECOMMISSIONED); } @CommandLine.Option(names = {"-o", "--order"}, @@ -76,22 +73,24 @@ public class TopologySubcommand implements Callable { private boolean fullInfo; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.createScmClient()) { - for (HddsProtos.NodeState state : stateArray) { - List nodes = scmClient.queryNode(state, - HddsProtos.QueryScope.CLUSTER, ""); - if (nodes != null && nodes.size() > 0) { - // show node state - System.out.println("State = " + state.toString()); - if (order) { - printOrderedByLocation(nodes); - } else { - printNodesWithLocation(nodes); - } + public Class getParentType() { + return OzoneAdmin.class; + } + + @Override + protected void execute(ScmClient scmClient) throws IOException { + for (HddsProtos.NodeState state : STATES) { + List nodes = scmClient.queryNode(state, + HddsProtos.QueryScope.CLUSTER, ""); + if (nodes != null && !nodes.isEmpty()) { + // show node state + System.out.println("State = " + state.toString()); + if (order) { + printOrderedByLocation(nodes); + } else { + printNodesWithLocation(nodes); } } - return null; } } @@ -124,7 +123,7 @@ private String formatPortOutput(List ports) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < ports.size(); i++) { HddsProtos.Port port = ports.get(i); - sb.append(port.getName() + "=" + port.getValue()); + sb.append(port.getName()).append("=").append(port.getValue()); if (i < ports.size() - 1) { sb.append(","); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java index cd81d32b8a82..53cbd2f63da3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,15 @@ */ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; +import java.io.IOException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import static org.apache.hadoop.hdds.scm.cli.container.ContainerCommands.checkContainerExists; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; /** * The handler of close container command. @@ -35,21 +35,15 @@ description = "close container", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class CloseSubcommand implements Callable { - - @ParentCommand - private ContainerCommands parent; +public class CloseSubcommand extends ScmSubcommand { @Parameters(description = "Id of the container to close") private long containerId; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - checkContainerExists(scmClient, containerId); - scmClient.closeContainer(containerId); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + checkContainerExists(scmClient, containerId); + scmClient.closeContainer(containerId); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index cf665b008f72..de1015d141e7 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,12 +22,14 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Spec; /** @@ -45,24 +47,23 @@ CreateSubcommand.class, CloseSubcommand.class }) -public class ContainerCommands implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class ContainerCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; - @ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + @Override + public Class getParentType() { + return OzoneAdmin.class; + } + public static void checkContainerExists(ScmClient scmClient, long containerId) throws IOException { ContainerInfo container = scmClient.getContainer(containerId); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java index eb79e50506e2..9eedbf858958 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; +import java.io.IOException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; @@ -28,7 +29,6 @@ import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process container creation command. @@ -38,27 +38,19 @@ description = "Create container", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class CreateSubcommand implements Callable { +public class CreateSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(CreateSubcommand.class); - @ParentCommand - private ContainerCommands parent; - @Option(description = "Owner of the new container", defaultValue = "OZONE", - required = false, names = { - "-o", "--owner"}) - + names = { "-o", "--owner"}) private String owner; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - ContainerWithPipeline container = scmClient.createContainer(owner); - LOG.info("Container {} is created.", - container.getContainerInfo().getContainerID()); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + ContainerWithPipeline container = scmClient.createContainer(owner); + LOG.info("Container {} is created.", + container.getContainerInfo().getContainerID()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java index a438fe906131..62d1b8ab2ae3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +18,16 @@ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; +import java.io.IOException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import static org.apache.hadoop.hdds.scm.cli.container.ContainerCommands.checkContainerExists; import picocli.CommandLine.Command; import picocli.CommandLine.Option; import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process delete container command. @@ -37,7 +37,7 @@ description = "Delete container", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class DeleteSubcommand implements Callable { +public class DeleteSubcommand extends ScmSubcommand { @Parameters(description = "Id of the container to close") private long containerId; @@ -46,15 +46,9 @@ public class DeleteSubcommand implements Callable { "--force"}, description = "forcibly delete the container") private boolean force; - @ParentCommand - private ContainerCommands parent; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - checkContainerExists(scmClient, containerId); - scmClient.deleteContainer(containerId, force); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + checkContainerExists(scmClient, containerId); + scmClient.deleteContainer(containerId, force); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java index 31e2a45dfc58..5defc2456a42 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,11 +17,12 @@ */ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; +import java.io.IOException; import java.util.stream.Collectors; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; @@ -31,7 +32,6 @@ import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process container info command. @@ -41,36 +41,30 @@ description = "Show information about a specific container", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class InfoSubcommand implements Callable { +public class InfoSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(InfoSubcommand.class); - @ParentCommand - private ContainerCommands parent; - @Parameters(description = "Decimal id of the container.") private long containerID; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - final ContainerWithPipeline container = scmClient. - getContainerWithPipeline(containerID); - Preconditions.checkNotNull(container, "Container cannot be null"); + public void execute(ScmClient scmClient) throws IOException { + final ContainerWithPipeline container = scmClient. + getContainerWithPipeline(containerID); + Preconditions.checkNotNull(container, "Container cannot be null"); - // Print container report info. - LOG.info("Container id: {}", containerID); - LOG.info("Pipeline id: {}", container.getPipeline().getId().getId()); - LOG.info("Container State: {}", container.getContainerInfo().getState()); + // Print container report info. + LOG.info("Container id: {}", containerID); + LOG.info("Pipeline id: {}", container.getPipeline().getId().getId()); + LOG.info("Container State: {}", container.getContainerInfo().getState()); - // Print pipeline of an existing container. - String machinesStr = container.getPipeline().getNodes().stream().map( - InfoSubcommand::buildDatanodeDetails) - .collect(Collectors.joining(",\n")); - LOG.info("Datanodes: [{}]", machinesStr); - return null; - } + // Print pipeline of an existing container. + String machinesStr = container.getPipeline().getNodes().stream().map( + InfoSubcommand::buildDatanodeDetails) + .collect(Collectors.joining(",\n")); + LOG.info("Datanodes: [{}]", machinesStr); } private static String buildDatanodeDetails(DatanodeDetails details) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index 3ffc118b57be..e9b0b7dc9a50 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +19,9 @@ import java.io.IOException; import java.util.List; -import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -36,7 +36,6 @@ import picocli.CommandLine.Command; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; /** * This is the handler that process container list command. @@ -46,22 +45,19 @@ description = "List containers", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ListSubcommand implements Callable { +public class ListSubcommand extends ScmSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ListSubcommand.class); - @ParentCommand - private ContainerCommands parent; - @Option(names = {"-s", "--start"}, - description = "Container id to start the iteration", required = false) - private long startId = 0; + description = "Container id to start the iteration") + private long startId; @Option(names = {"-c", "--count"}, description = "Maximum number of containers to list", defaultValue = "20", showDefaultValue = Visibility.ALWAYS) - private int count = 20; + private int count; private static final ObjectWriter WRITER; @@ -83,17 +79,13 @@ private void outputContainerInfo(ContainerInfo containerInfo) } @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - List containerList = - scmClient.listContainer(startId, count); + public void execute(ScmClient scmClient) throws IOException { + List containerList = + scmClient.listContainer(startId, count); - // Output data list - for (ContainerInfo container : containerList) { - outputContainerInfo(container); - } - return null; + // Output data list + for (ContainerInfo container : containerList) { + outputContainerInfo(container); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java index b7ba59c77604..7e77c60f6e1e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,10 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -38,21 +40,20 @@ subcommands = { ListInfoSubcommand.class }) -public class DatanodeCommands implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class DatanodeCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; - @CommandLine.ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + + @Override + public Class getParentType() { + return OzoneAdmin.class; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java index e4060b3dadaf..80c5ecaae820 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,13 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import picocli.CommandLine; import java.io.IOException; import java.util.List; -import java.util.concurrent.Callable; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -39,44 +39,36 @@ description = "List info of datanodes", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ListInfoSubcommand implements Callable { - - @CommandLine.ParentCommand - private DatanodeCommands parent; +public class ListInfoSubcommand extends ScmSubcommand { @CommandLine.Option(names = {"--ip"}, description = "Show info by ip address.", - defaultValue = "", - required = false) + defaultValue = "") private String ipaddress; @CommandLine.Option(names = {"--id"}, description = "Show info by datanode UUID.", - defaultValue = "", - required = false) + defaultValue = "") private String uuid; private List pipelines; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - pipelines = scmClient.listPipelines(); - if (Strings.isNullOrEmpty(ipaddress) && Strings.isNullOrEmpty(uuid)) { - getAllNodes(scmClient).stream().forEach(p -> printDatanodeInfo(p)); - } else { - Stream allNodes = getAllNodes(scmClient).stream(); - if (!Strings.isNullOrEmpty(ipaddress)) { - allNodes = allNodes.filter(p -> p.getIpAddress() - .compareToIgnoreCase(ipaddress) == 0); - } - if (!Strings.isNullOrEmpty(uuid)) { - allNodes = allNodes.filter(p -> p.getUuid().toString().equals(uuid)); - } - allNodes.forEach(p -> printDatanodeInfo(p)); + public void execute(ScmClient scmClient) throws IOException { + pipelines = scmClient.listPipelines(); + if (Strings.isNullOrEmpty(ipaddress) && Strings.isNullOrEmpty(uuid)) { + getAllNodes(scmClient).forEach(this::printDatanodeInfo); + } else { + Stream allNodes = getAllNodes(scmClient).stream(); + if (!Strings.isNullOrEmpty(ipaddress)) { + allNodes = allNodes.filter(p -> p.getIpAddress() + .compareToIgnoreCase(ipaddress) == 0); + } + if (!Strings.isNullOrEmpty(uuid)) { + allNodes = allNodes.filter(p -> p.getUuid().toString().equals(uuid)); } - return null; + allNodes.forEach(this::printDatanodeInfo); } } @@ -101,7 +93,7 @@ private void printDatanodeInfo(DatanodeDetails datanode) { " or the node is not in Healthy state."); } else { relatedPipelineNum = relatedPipelines.size(); - relatedPipelines.stream().forEach( + relatedPipelines.forEach( p -> pipelineListInfo.append(p.getId().getId().toString()) .append("/").append(p.getFactor().toString()).append("/") .append(p.getType().toString()).append("/") @@ -118,4 +110,4 @@ private void printDatanodeInfo(DatanodeDetails datanode) { + "/" + datanode.getHostName() + "/" + relatedPipelineNum + " pipelines) \n" + "Related pipelines: \n" + pipelineListInfo); } -} \ No newline at end of file +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java index ec4b1b789e8c..a61655dc66b0 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,11 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; /** * Handler of activate pipeline command. @@ -33,20 +34,14 @@ description = "Activates the given Pipeline", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ActivatePipelineSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; +public class ActivatePipelineSubcommand extends ScmSubcommand { @CommandLine.Parameters(description = "ID of the pipeline to activate") private String pipelineId; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.activatePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.activatePipeline( + HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java index 89a280e805c0..78b83e56db07 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,11 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; /** * Handler of close pipeline command. @@ -33,20 +34,14 @@ description = "Close pipeline", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ClosePipelineSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; +public class ClosePipelineSubcommand extends ScmSubcommand { @CommandLine.Parameters(description = "ID of the pipeline to close") private String pipelineId; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.closePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.closePipeline( + HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java index e0bdddb7797e..c784be88b376 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,12 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; /** * Handler of createPipeline command. @@ -34,44 +35,37 @@ description = "create pipeline", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class CreatePipelineSubcommand implements Callable { - @CommandLine.ParentCommand - private PipelineCommands parent; +public class CreatePipelineSubcommand extends ScmSubcommand { @CommandLine.Option( names = {"-t", "--replicationType"}, description = "Replication type (STAND_ALONE, RATIS)", defaultValue = "STAND_ALONE" ) - private HddsProtos.ReplicationType type - = HddsProtos.ReplicationType.STAND_ALONE; + private HddsProtos.ReplicationType type; @CommandLine.Option( names = {"-f", "--replicationFactor"}, description = "Replication factor (ONE, THREE)", defaultValue = "ONE" ) - private HddsProtos.ReplicationFactor factor - = HddsProtos.ReplicationFactor.ONE; + private HddsProtos.ReplicationFactor factor; @Override - public Void call() throws Exception { + public void execute(ScmClient scmClient) throws IOException { if (type == HddsProtos.ReplicationType.CHAINED) { throw new IllegalArgumentException(type.name() + " is not supported yet."); } - try (ScmClient scmClient = parent.getParent().createScmClient()) { - Pipeline pipeline = scmClient.createReplicationPipeline( - type, - factor, - HddsProtos.NodePool.getDefaultInstance()); + Pipeline pipeline = scmClient.createReplicationPipeline( + type, + factor, + HddsProtos.NodePool.getDefaultInstance()); - if (pipeline != null) { - System.out.println(pipeline.getId().toString() + - " is created. Factor: " + pipeline.getFactor() + - ", Type: " + pipeline.getType()); - } - return null; + if (pipeline != null) { + System.out.println(pipeline.getId().toString() + + " is created. Factor: " + pipeline.getFactor() + + ", Type: " + pipeline.getType()); } } -} \ No newline at end of file +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java index 4f4f741a3647..70df4d91fae9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,11 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; /** * Handler of deactivate pipeline command. @@ -33,20 +34,14 @@ description = "Deactivates the given Pipeline", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class DeactivatePipelineSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; +public class DeactivatePipelineSubcommand extends ScmSubcommand { @CommandLine.Parameters(description = "ID of the pipeline to deactivate") private String pipelineId; @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.deactivatePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); - return null; - } + public void execute(ScmClient scmClient) throws IOException { + scmClient.deactivatePipeline( + HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java index 729daeae56bc..58ae26e500e1 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,12 @@ import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import picocli.CommandLine; -import java.util.concurrent.Callable; +import java.io.IOException; import java.util.stream.Stream; /** @@ -35,38 +36,29 @@ description = "List all active pipelines", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class ListPipelinesSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; +public class ListPipelinesSubcommand extends ScmSubcommand { @CommandLine.Option(names = {"-ffc", "--filterByFactor"}, description = "Filter listed pipelines by Factor(ONE/one)", - defaultValue = "", - required = false) + defaultValue = "") private String factor; @CommandLine.Option(names = {"-fst", "--filterByState"}, description = "Filter listed pipelines by State(OPEN/CLOSE)", - defaultValue = "", - required = false) + defaultValue = "") private String state; - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - Stream stream = scmClient.listPipelines().stream(); - if (!Strings.isNullOrEmpty(factor)) { - stream = stream.filter( - p -> p.getFactor().toString().compareToIgnoreCase(factor) == 0); - } - if (!Strings.isNullOrEmpty(state)) { - stream = stream.filter(p -> p.getPipelineState().toString() - .compareToIgnoreCase(state) == 0); - } - stream.forEach(System.out::println); - return null; + public void execute(ScmClient scmClient) throws IOException { + Stream stream = scmClient.listPipelines().stream(); + if (!Strings.isNullOrEmpty(factor)) { + stream = stream.filter( + p -> p.getFactor().toString().compareToIgnoreCase(factor) == 0); + } + if (!Strings.isNullOrEmpty(state)) { + stream = stream.filter(p -> p.getPipelineState().toString() + .compareToIgnoreCase(state) == 0); } + stream.forEach(System.out::println); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java index d5c0234d01f4..ba7371e6214a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,12 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Spec; /** @@ -43,21 +44,20 @@ CreatePipelineSubcommand.class, ClosePipelineSubcommand.class }) -public class PipelineCommands implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class PipelineCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; - @ParentCommand - private WithScmClient parent; - - public WithScmClient getParent() { - return parent; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } + + @Override + public Class getParentType() { + return OzoneAdmin.class; + } } diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 64b0aca7006c..c512a9005dd6 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-client - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Client Apache Hadoop Ozone Client jar diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java index 5bae15ddfe11..6c5d1dd909d3 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -54,6 +54,8 @@ public final class BucketArgs { * Bucket encryption key name. */ private String bucketEncryptionKey; + private final String sourceVolume; + private final String sourceBucket; /** * Private constructor, constructed via builder. @@ -62,15 +64,19 @@ public final class BucketArgs { * @param acls list of ACLs. * @param metadata map of bucket metadata * @param bucketEncryptionKey bucket encryption key name + * @param sourceVolume + * @param sourceBucket */ private BucketArgs(Boolean versioning, StorageType storageType, - List acls, Map metadata, - String bucketEncryptionKey) { + List acls, Map metadata, + String bucketEncryptionKey, String sourceVolume, String sourceBucket) { this.acls = acls; this.versioning = versioning; this.storageType = storageType; this.metadata = metadata; this.bucketEncryptionKey = bucketEncryptionKey; + this.sourceVolume = sourceVolume; + this.sourceBucket = sourceBucket; } /** @@ -123,6 +129,14 @@ public static BucketArgs.Builder newBuilder() { return new BucketArgs.Builder(); } + public String getSourceVolume() { + return sourceVolume; + } + + public String getSourceBucket() { + return sourceBucket; + } + /** * Builder for OmBucketInfo. */ @@ -132,6 +146,8 @@ public static class Builder { private List acls; private Map metadata; private String bucketEncryptionKey; + private String sourceVolume; + private String sourceBucket; public Builder() { metadata = new HashMap<>(); @@ -161,13 +177,24 @@ public BucketArgs.Builder setBucketEncryptionKey(String bek) { this.bucketEncryptionKey = bek; return this; } + + public BucketArgs.Builder setSourceVolume(String volume) { + sourceVolume = volume; + return this; + } + + public BucketArgs.Builder setSourceBucket(String bucket) { + sourceBucket = bucket; + return this; + } + /** * Constructs the BucketArgs. * @return instance of BucketArgs. */ public BucketArgs build() { return new BucketArgs(versioning, storageType, acls, metadata, - bucketEncryptionKey); + bucketEncryptionKey, sourceVolume, sourceBucket); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index d22b846e1c85..d71e03c9b881 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -109,6 +109,8 @@ public class OzoneBucket extends WithMetadata { private OzoneObj ozoneObj; + private String sourceVolume; + private String sourceBucket; private OzoneBucket(ConfigurationSource conf, String volumeName, String bucketName, ReplicationFactor defaultReplication, @@ -138,11 +140,13 @@ private OzoneBucket(ConfigurationSource conf, String volumeName, .setResType(OzoneObj.ResourceType.BUCKET) .setStoreType(OzoneObj.StoreType.OZONE).build(); } + @SuppressWarnings("parameternumber") public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, String volumeName, String bucketName, StorageType storageType, Boolean versioning, long creationTime, Map metadata, - String encryptionKeyName) { + String encryptionKeyName, + String sourceVolume, String sourceBucket) { this(conf, volumeName, bucketName, null, null, proxy); this.storageType = storageType; this.versioning = versioning; @@ -150,6 +154,8 @@ public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, this.creationTime = Instant.ofEpochMilli(creationTime); this.metadata = metadata; this.encryptionKeyName = encryptionKeyName; + this.sourceVolume = sourceVolume; + this.sourceBucket = sourceBucket; modificationTime = Instant.now(); if (modificationTime.isBefore(this.creationTime)) { modificationTime = Instant.ofEpochSecond( @@ -161,9 +167,10 @@ public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, String volumeName, String bucketName, StorageType storageType, Boolean versioning, long creationTime, long modificationTime, - Map metadata, String encryptionKeyName) { + Map metadata, String encryptionKeyName, + String sourceVolume, String sourceBucket) { this(conf, proxy, volumeName, bucketName, storageType, versioning, - creationTime, metadata, encryptionKeyName); + creationTime, metadata, encryptionKeyName, sourceVolume, sourceBucket); this.modificationTime = Instant.ofEpochMilli(modificationTime); } @@ -306,6 +313,16 @@ public String getEncryptionKeyName() { return encryptionKeyName; } + public String getSourceVolume() { + return sourceVolume; + } + + public String getSourceBucket() { + return sourceBucket; + } + + /** + * Builder for OmBucketInfo. /** * Adds ACLs to the Bucket. * @param addAcl ACL to be added @@ -455,6 +472,16 @@ public void renameKey(String fromKeyName, String toKeyName) proxy.renameKey(volumeName, name, fromKeyName, toKeyName); } + /** + * Rename the key by keyMap, The key is fromKeyName and value is toKeyName. + * @param keyMap The key is original key name nad value is new key name. + * @throws IOException + */ + public void renameKeys(Map keyMap) + throws IOException { + proxy.renameKeys(volumeName, name, keyMap); + } + /** * Initiate multipart upload for a specified key. * @param keyName diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index 2f7b10718784..9bf3973aeaec 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -137,13 +137,17 @@ public static OzoneClient getRpcClient(ConfigurationSource config) // configuration, we don't fall back to default ozone.om.address defined // in ozone-default.xml. - if (OmUtils.isServiceIdsDefined(config)) { + String[] serviceIds = config.getTrimmedStrings(OZONE_OM_SERVICE_IDS_KEY); + if (serviceIds.length > 1) { throw new IOException("Following ServiceID's " + config.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY) + " are" + " defined in the configuration. Use the method getRpcClient which " + "takes serviceID and configuration as param"); + } else if (serviceIds.length == 1) { + return getRpcClient(getClientProtocol(config, serviceIds[0]), config); + } else { + return getRpcClient(getClientProtocol(config), config); } - return getRpcClient(getClientProtocol(config), config); } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java index 3cab66465009..712d1199a335 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java @@ -172,10 +172,8 @@ public void addPreallocateBlocks(OmKeyLocationInfoGroup version, // server may return any number of blocks, (0 to any) // only the blocks allocated in this open session (block createVersion // equals to open session version) - for (OmKeyLocationInfo subKeyInfo : version.getLocationList()) { - if (subKeyInfo.getCreateVersion() == openVersion) { - addKeyLocationInfo(subKeyInfo); - } + for (OmKeyLocationInfo subKeyInfo : version.getLocationList(openVersion)) { + addKeyLocationInfo(subKeyInfo); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index 4af683827240..769035a5e5d0 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -20,7 +20,6 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.hdds.client.BlockID; @@ -35,7 +34,6 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import java.io.OutputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -325,62 +323,14 @@ public long getRemainingOfIndex(int index) throws IOException { return blockStreams.get(index).getRemaining(); } - /** - * Copies some or all bytes from a large (over 2GB) InputStream - * to an OutputStream, optionally skipping input bytes. - *

- * Copy the method from IOUtils of commons-io to reimplement skip by seek - * rather than read. The reason why IOUtils of commons-io implement skip - * by read can be found at - * IO-203. - *

- *

- * This method uses the provided buffer, so there is no need to use a - * BufferedInputStream. - *

- * - * @param output the OutputStream to write to - * @param inputOffset : number of bytes to skip from input before copying - * -ve values are ignored - * @param length : number of bytes to copy. -ve means all - * @param buffer the buffer to use for the copy - * @return the number of bytes copied - * @throws NullPointerException if the input or output is null - * @throws IOException if an I/O error occurs - */ - public long copyLarge(final OutputStream output, - final long inputOffset, final long len, final byte[] buffer) - throws IOException { - if (inputOffset > 0) { - seek(inputOffset); - } - - if (len == 0) { + @Override + public long skip(long n) throws IOException { + if (n <= 0) { return 0; } - final int bufferLength = buffer.length; - int bytesToRead = bufferLength; - if (len > 0 && len < bufferLength) { - bytesToRead = (int) len; - } - - int read; - long totalRead = 0; - while (bytesToRead > 0) { - read = read(buffer, 0, bytesToRead); - if (read == IOUtils.EOF) { - break; - } - - output.write(buffer, 0, read); - totalRead += read; - if (len > 0) { // only adjust len if not reading to the end - // Note the cast must work because buffer.length is an integer - bytesToRead = (int) Math.min(len - totalRead, bufferLength); - } - } - - return totalRead; + long toSkip = Math.min(n, length - getPos()); + seek(getPos() + toSkip); + return toSkip; } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java index a69740f07952..14b28665bdb1 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java @@ -57,6 +57,11 @@ public int available() throws IOException { return inputStream.available(); } + @Override + public long skip(long n) throws IOException { + return inputStream.skip(n); + } + public InputStream getInputStream() { return inputStream; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 9c662efbf000..1b8d93ac7258 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -314,7 +314,17 @@ void deleteKeys(String volumeName, String bucketName, * @throws IOException */ void renameKey(String volumeName, String bucketName, String fromKeyName, - String toKeyName) throws IOException; + String toKeyName) throws IOException; + + /** + * Renames existing keys within a bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param keyMap The key is original key name nad value is new key name. + * @throws IOException + */ + void renameKeys(String volumeName, String bucketName, + Map keyMap) throws IOException; /** * Returns list of Keys in {Volume/Bucket} that matches the keyPrefix, diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 56c867d45ea0..d72d930e54f3 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -76,6 +76,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDeleteKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; @@ -84,6 +85,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmPartInfo; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; @@ -447,6 +449,8 @@ public void createBucket( .setIsVersionEnabled(isVersionEnabled) .addAllMetadata(bucketArgs.getMetadata()) .setStorageType(storageType) + .setSourceVolume(bucketArgs.getSourceVolume()) + .setSourceBucket(bucketArgs.getSourceBucket()) .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())); if (bek != null) { @@ -613,7 +617,10 @@ public OzoneBucket getBucketDetails( bucketInfo.getModificationTime(), bucketInfo.getMetadata(), bucketInfo.getEncryptionKeyInfo() != null ? bucketInfo - .getEncryptionKeyInfo().getKeyName() : null); + .getEncryptionKeyInfo().getKeyName() : null, + bucketInfo.getSourceVolume(), + bucketInfo.getSourceBucket() + ); } @Override @@ -634,7 +641,9 @@ public List listBuckets(String volumeName, String bucketPrefix, bucket.getModificationTime(), bucket.getMetadata(), bucket.getEncryptionKeyInfo() != null ? bucket - .getEncryptionKeyInfo().getKeyName() : null)) + .getEncryptionKeyInfo().getKeyName() : null, + bucket.getSourceVolume(), + bucket.getSourceBucket())) .collect(Collectors.toList()); } @@ -730,16 +739,9 @@ public void deleteKeys( throws IOException { HddsClientUtils.verifyResourceName(volumeName, bucketName); Preconditions.checkNotNull(keyNameList); - List keyArgsList = new ArrayList<>(); - for (String keyName: keyNameList) { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build(); - keyArgsList.add(keyArgs); - } - ozoneManagerClient.deleteKeys(keyArgsList); + OmDeleteKeys omDeleteKeys = new OmDeleteKeys(volumeName, bucketName, + keyNameList); + ozoneManagerClient.deleteKeys(omDeleteKeys); } @Override @@ -759,6 +761,18 @@ public void renameKey(String volumeName, String bucketName, ozoneManagerClient.renameKey(keyArgs, toKeyName); } + @Override + public void renameKeys(String volumeName, String bucketName, + Map keyMap) throws IOException { + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + HddsClientUtils.checkNotNull(keyMap); + OmRenameKeys omRenameKeys = + new OmRenameKeys(volumeName, bucketName, keyMap, null); + ozoneManagerClient.renameKeys(omRenameKeys); + } + + @Override public List listKeys(String volumeName, String bucketName, String keyPrefix, String prevKey, @@ -1016,6 +1030,7 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, .setBucketName(bucketName) .setKeyName(keyName) .setRefreshPipeline(true) + .setSortDatanodesInPipeline(topologyAwareReadEnabled) .build(); return ozoneManagerClient.getFileStatus(keyArgs); } @@ -1098,6 +1113,7 @@ public List listStatus(String volumeName, String bucketName, .setBucketName(bucketName) .setKeyName(keyName) .setRefreshPipeline(true) + .setSortDatanodesInPipeline(topologyAwareReadEnabled) .build(); return ozoneManagerClient .listStatus(keyArgs, recursive, startKey, numEntries); diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 754351b46315..78eb2e71fa4b 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-common - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Common Apache Hadoop Ozone Common jar diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 6eb8b18b1ee4..93e0e7f7dec0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -34,6 +34,7 @@ import java.util.OptionalInt; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.conf.OMClientConfig; @@ -54,6 +55,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; @@ -244,6 +246,7 @@ public static boolean isReadOnly( case DeleteBucket: case CreateKey: case RenameKey: + case RenameKeys: case DeleteKey: case DeleteKeys: case CommitKey: @@ -527,4 +530,49 @@ public static void validateKeyName(String keyName) OMException.ResultCodes.INVALID_KEY_NAME); } } + + /** + * Return configured OzoneManager service id based on the following logic. + * Look at 'ozone.om.internal.service.id' first. If configured, return that. + * If the above is not configured, look at 'ozone.om.service.ids'. + * If count(ozone.om.service.ids) == 1, return that id. + * If count(ozone.om.service.ids) > 1 throw exception + * If 'ozone.om.service.ids' is not configured, return null. (Non HA) + * @param conf configuration + * @return OM service ID. + * @throws IOException on error. + */ + public static String getOzoneManagerServiceId(OzoneConfiguration conf) + throws IOException { + String localOMServiceId = conf.get(OZONE_OM_INTERNAL_SERVICE_ID); + Collection omServiceIds = conf.getTrimmedStringCollection( + OZONE_OM_SERVICE_IDS_KEY); + if (localOMServiceId == null) { + LOG.info("{} is not defined, falling back to {} to find serviceID for " + + "OzoneManager if it is HA enabled cluster", + OZONE_OM_INTERNAL_SERVICE_ID, OZONE_OM_SERVICE_IDS_KEY); + if (omServiceIds.size() > 1) { + throw new IOException(String.format( + "More than 1 OzoneManager ServiceID (%s) " + + "configured : %s, but %s is not " + + "configured.", OZONE_OM_SERVICE_IDS_KEY, + omServiceIds.toString(), OZONE_OM_INTERNAL_SERVICE_ID)); + } + } else if (!omServiceIds.contains(localOMServiceId)) { + throw new IOException(String.format( + "Cannot find the internal service id %s in %s", + localOMServiceId, omServiceIds.toString())); + } else { + omServiceIds = Collections.singletonList(localOMServiceId); + } + + if (omServiceIds.isEmpty()) { + LOG.info("No OzoneManager ServiceID configured."); + return null; + } else { + String serviceId = omServiceIds.iterator().next(); + LOG.info("Using OzoneManager ServiceID '{}'.", serviceId); + return serviceId; + } + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index cd8b12614d79..3480063d1323 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -32,16 +32,14 @@ public enum OMAction implements AuditAction { DELETE_BUCKET, DELETE_KEY, RENAME_KEY, + RENAME_KEYS, SET_OWNER, SET_QUOTA, UPDATE_VOLUME, UPDATE_BUCKET, UPDATE_KEY, PURGE_KEYS, - - // S3 Bucket - CREATE_S3_BUCKET, - DELETE_S3_BUCKET, + DELETE_KEYS, // READ Actions CHECK_VOLUME_ACCESS, @@ -52,7 +50,6 @@ public enum OMAction implements AuditAction { READ_VOLUME, READ_BUCKET, READ_KEY, - LIST_S3BUCKETS, INITIATE_MULTIPART_UPLOAD, COMMIT_MULTIPART_UPLOAD_PARTKEY, COMPLETE_MULTIPART_UPLOAD, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 4f512a55032d..f16679a681eb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -239,4 +239,11 @@ private OMConfigKeys() { "ozone.om.keyname.character.check.enabled"; public static final boolean OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT = false; + + // This config needs to be enabled, when S3G created objects used via + // FileSystem API. + public static final String OZONE_OM_ENABLE_FILESYSTEM_PATHS = + "ozone.om.enable.filesystem.paths"; + public static final boolean OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT = + false; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index 58d5a02e1f30..e08dccb6a5a1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -223,6 +223,13 @@ public enum ResultCodes { INVALID_VOLUME_NAME, - REPLAY // When ratis logs are replayed. + PARTIAL_DELETE, + + DETECTED_LOOP_IN_BUCKET_LINKS, + + NOT_SUPPORTED_OPERATION, + + PARTIAL_RENAME + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java deleted file mode 100644 index 0eeb873a58f8..000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.exceptions; - -import java.io.IOException; - -/** - * Exception thrown by Ozone Manager when a transaction is replayed. This - * exception should not be thrown to client. It is used in - * OMClientRequest#validateAndUpdateCache to log error and continue in case - * of replay transaction. - */ -public class OMReplayException extends IOException { - - private final boolean needsDBOperations; - - public OMReplayException() { - this(false); - } - - /** - * When the transaction is a replay but still needs some DB operations to - * be performed (such as cleanup of old keys). - * @param needsDBOperations - */ - public OMReplayException(boolean needsDBOperations) { - // Dummy message. This exception is not thrown to client. - super("Replayed transaction"); - this.needsDBOperations = needsDBOperations; - } - - public boolean isDBOperationNeeded() { - return needsDBOperations; - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java index 3b2692de7ba9..1abe5abfdb1b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java @@ -37,6 +37,8 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.FailoverProxyProvider; import org.apache.hadoop.io.retry.RetryInvocationHandler; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; @@ -172,9 +174,17 @@ private OzoneManagerProtocolPB createOMProxy(InetSocketAddress omAddress) LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); RPC.setProtocolEngine(hadoopConf, OzoneManagerProtocolPB.class, ProtobufRpcEngine.class); - return RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress, ugi, - hadoopConf, NetUtils.getDefaultSocketFactory(hadoopConf), - (int) OmUtils.getOMClientRpcTimeOut(conf)); + + // FailoverOnNetworkException ensures that the IPC layer does not attempt + // retries on the same OM in case of connection exception. This retry + // policy essentially results in TRY_ONCE_THEN_FAIL. + RetryPolicy connectionRetryPolicy = RetryPolicies + .failoverOnNetworkException(0); + + return RPC.getProtocolProxy(OzoneManagerProtocolPB.class, omVersion, + omAddress, ugi, hadoopConf, NetUtils.getDefaultSocketFactory( + hadoopConf), (int) OmUtils.getOMClientRpcTimeOut(conf), + connectionRetryPolicy).getProxy(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java index e1ae0bbfbd86..c1801388bfe7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java @@ -49,6 +49,10 @@ public CryptoProtocolVersion getVersion() { return version; } + public BucketEncryptionKeyInfo copy() { + return new BucketEncryptionKeyInfo(version, suite, keyName); + } + /** * Builder for BucketEncryptionKeyInfo. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index e9a8cbcd6fb0..abbe3955f6b1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -76,6 +76,10 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { */ private BucketEncryptionKeyInfo bekInfo; + private final String sourceVolume; + + private final String sourceBucket; + /** * Private constructor, constructed via builder. * @param volumeName - Volume name. @@ -87,19 +91,23 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { * @param modificationTime - Bucket modification time. * @param metadata - metadata. * @param bekInfo - bucket encryption key info. + * @param sourceVolume - source volume for bucket links, null otherwise + * @param sourceBucket - source bucket for bucket links, null otherwise */ @SuppressWarnings("checkstyle:ParameterNumber") private OmBucketInfo(String volumeName, - String bucketName, - List acls, - boolean isVersionEnabled, - StorageType storageType, - long creationTime, - long modificationTime, - long objectID, - long updateID, - Map metadata, - BucketEncryptionKeyInfo bekInfo) { + String bucketName, + List acls, + boolean isVersionEnabled, + StorageType storageType, + long creationTime, + long modificationTime, + long objectID, + long updateID, + Map metadata, + BucketEncryptionKeyInfo bekInfo, + String sourceVolume, + String sourceBucket) { this.volumeName = volumeName; this.bucketName = bucketName; this.acls = acls; @@ -111,6 +119,8 @@ private OmBucketInfo(String volumeName, this.updateID = updateID; this.metadata = metadata; this.bekInfo = bekInfo; + this.sourceVolume = sourceVolume; + this.sourceBucket = sourceBucket; } /** @@ -208,6 +218,18 @@ public BucketEncryptionKeyInfo getEncryptionKeyInfo() { return bekInfo; } + public String getSourceVolume() { + return sourceVolume; + } + + public String getSourceBucket() { + return sourceBucket; + } + + public boolean isLink() { + return sourceVolume != null && sourceBucket != null; + } + /** * Returns new builder class that builds a OmBucketInfo. * @@ -235,6 +257,10 @@ public Map toAuditMap() { (bekInfo != null) ? bekInfo.getKeyName() : null); auditMap.put(OzoneConsts.MODIFICATION_TIME, String.valueOf(this.modificationTime)); + if (isLink()) { + auditMap.put(OzoneConsts.SOURCE_VOLUME, sourceVolume); + auditMap.put(OzoneConsts.SOURCE_BUCKET, sourceBucket); + } return auditMap; } @@ -242,7 +268,22 @@ public Map toAuditMap() { * Return a new copy of the object. */ public OmBucketInfo copyObject() { - OmBucketInfo.Builder builder = new OmBucketInfo.Builder() + Builder builder = toBuilder(); + + if (bekInfo != null) { + builder.setBucketEncryptionKey(bekInfo.copy()); + } + + builder.acls.clear(); + acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), + acl.getName(), (BitSet) acl.getAclBitSet().clone(), + acl.getAclScope()))); + + return builder.build(); + } + + public Builder toBuilder() { + return new Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setStorageType(storageType) @@ -251,19 +292,11 @@ public OmBucketInfo copyObject() { .setModificationTime(modificationTime) .setObjectID(objectID) .setUpdateID(updateID) - .setBucketEncryptionKey(bekInfo != null ? - new BucketEncryptionKeyInfo(bekInfo.getVersion(), - bekInfo.getSuite(), bekInfo.getKeyName()) : null); - - acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); - - if (metadata != null) { - metadata.forEach((k, v) -> builder.addMetadata(k, v)); - } - return builder.build(); - + .setBucketEncryptionKey(bekInfo) + .setSourceVolume(sourceVolume) + .setSourceBucket(sourceBucket) + .setAcls(acls) + .addAllMetadata(metadata); } /** @@ -281,6 +314,8 @@ public static class Builder { private long updateID; private Map metadata; private BucketEncryptionKeyInfo bekInfo; + private String sourceVolume; + private String sourceBucket; public Builder() { //Default values @@ -362,6 +397,16 @@ public Builder setBucketEncryptionKey( return this; } + public Builder setSourceVolume(String volume) { + this.sourceVolume = volume; + return this; + } + + public Builder setSourceBucket(String bucket) { + this.sourceBucket = bucket; + return this; + } + /** * Constructs the OmBucketInfo. * @return instance of OmBucketInfo. @@ -375,7 +420,7 @@ public OmBucketInfo build() { return new OmBucketInfo(volumeName, bucketName, acls, isVersionEnabled, storageType, creationTime, modificationTime, objectID, updateID, - metadata, bekInfo); + metadata, bekInfo, sourceVolume, sourceBucket); } } @@ -397,6 +442,12 @@ public BucketInfo getProtobuf() { if (bekInfo != null && bekInfo.getKeyName() != null) { bib.setBeinfo(OMPBHelper.convert(bekInfo)); } + if (sourceVolume != null) { + bib.setSourceVolume(sourceVolume); + } + if (sourceBucket != null) { + bib.setSourceBucket(sourceBucket); + } return bib.build(); } @@ -428,17 +479,28 @@ public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) { if (bucketInfo.hasBeinfo()) { obib.setBucketEncryptionKey(OMPBHelper.convert(bucketInfo.getBeinfo())); } + if (bucketInfo.hasSourceVolume()) { + obib.setSourceVolume(bucketInfo.getSourceVolume()); + } + if (bucketInfo.hasSourceBucket()) { + obib.setSourceBucket(bucketInfo.getSourceBucket()); + } return obib.build(); } @Override public String getObjectInfo() { + String sourceInfo = sourceVolume != null && sourceBucket != null + ? ", source='" + sourceVolume + "/" + sourceBucket + "'" + : ""; + return "OMBucketInfo{" + - "volume='" + volumeName + '\'' + - ", bucket='" + bucketName + '\'' + - ", isVersionEnabled='" + isVersionEnabled + '\'' + - ", storageType='" + storageType + '\'' + - ", creationTime='" + creationTime + '\'' + + "volume='" + volumeName + "'" + + ", bucket='" + bucketName + "'" + + ", isVersionEnabled='" + isVersionEnabled + "'" + + ", storageType='" + storageType + "'" + + ", creationTime='" + creationTime + "'" + + sourceInfo + '}'; } @@ -460,6 +522,8 @@ public boolean equals(Object o) { storageType == that.storageType && objectID == that.objectID && updateID == that.updateID && + Objects.equals(sourceVolume, that.sourceVolume) && + Objects.equals(sourceBucket, that.sourceBucket) && Objects.equals(metadata, that.metadata) && Objects.equals(bekInfo, that.bekInfo); } @@ -468,4 +532,22 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(volumeName, bucketName); } + + @Override + public String toString() { + return "OmBucketInfo{" + + "volumeName='" + volumeName + "'" + + ", bucketName='" + bucketName + "'" + + ", acls=" + acls + + ", isVersionEnabled=" + isVersionEnabled + + ", storageType=" + storageType + + ", creationTime=" + creationTime + + ", bekInfo=" + bekInfo + + ", sourceVolume='" + sourceVolume + "'" + + ", sourceBucket='" + sourceBucket + "'" + + ", objectID=" + objectID + + ", updateID=" + updateID + + ", metadata=" + metadata + + '}'; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteKeys.java new file mode 100644 index 000000000000..4274078f24d5 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDeleteKeys.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import java.util.List; + +/** + * Represent class which has info of Keys to be deleted from Client. + */ +public class OmDeleteKeys { + + private String volume; + private String bucket; + + private List keyNames; + + + public OmDeleteKeys(String volume, String bucket, List keyNames) { + this.volume = volume; + this.bucket = bucket; + this.keyNames = keyNames; + } + + public String getVolume() { + return volume; + } + + public String getBucket() { + return bucket; + } + + public List< String > getKeyNames() { + return keyNames; + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index 2a882a43a926..c08c988fc7e3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -162,6 +162,24 @@ public void addLocationInfo(OmKeyLocationInfo locationInfo) { locationInfoList.add(locationInfo); } + public OmKeyArgs.Builder toBuilder() { + return new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setDataSize(dataSize) + .setType(type) + .setFactor(factor) + .setLocationInfoList(locationInfoList) + .setIsMultipartKey(isMultipartKey) + .setMultipartUploadID(multipartUploadID) + .setMultipartUploadPartNumber(multipartUploadPartNumber) + .addAllMetadata(metadata) + .setRefreshPipeline(refreshPipeline) + .setSortDatanodesInPipeline(sortDatanodesInPipeline) + .setAcls(acls); + } + /** * Builder class of OmKeyArgs. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 783089ec1c7a..d0e8bee52345 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -148,21 +148,17 @@ public void updateModifcationTime() { public void updateLocationInfoList(List locationInfoList) { long latestVersion = getLatestVersionLocations().getVersion(); OmKeyLocationInfoGroup keyLocationInfoGroup = getLatestVersionLocations(); - List currentList = - keyLocationInfoGroup.getLocationList(); - List latestVersionList = - keyLocationInfoGroup.getBlocksLatestVersionOnly(); // Updates the latest locationList in the latest version only with // given locationInfoList here. // TODO : The original allocated list and the updated list here may vary // as the containers on the Datanode on which the blocks were pre allocated // might get closed. The diff of blocks between these two lists here // need to be garbage collected in case the ozone client dies. - currentList.removeAll(latestVersionList); + keyLocationInfoGroup.removeBlocks(latestVersion); // set each of the locationInfo object to the latest version - locationInfoList.stream().forEach(omKeyLocationInfo -> omKeyLocationInfo + locationInfoList.forEach(omKeyLocationInfo -> omKeyLocationInfo .setCreateVersion(latestVersion)); - currentList.addAll(locationInfoList); + keyLocationInfoGroup.addAll(latestVersion, locationInfoList); } /** @@ -381,13 +377,26 @@ public OmKeyInfo build() { } } + /** + * For network transmit. + * @return + */ public KeyInfo getProtobuf() { + return getProtobuf(false); + } + + /** + * + * @param ignorePipeline true for persist to DB, false for network transmit. + * @return + */ + public KeyInfo getProtobuf(boolean ignorePipeline) { long latestVersion = keyLocationVersions.size() == 0 ? -1 : keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion(); List keyLocations = new ArrayList<>(); for (OmKeyLocationInfoGroup locationInfoGroup : keyLocationVersions) { - keyLocations.add(locationInfoGroup.getProtobuf()); + keyLocations.add(locationInfoGroup.getProtobuf(ignorePipeline)); } KeyInfo.Builder kb = KeyInfo.newBuilder() @@ -397,8 +406,8 @@ public KeyInfo getProtobuf() { .setDataSize(dataSize) .setFactor(factor) .setType(type) - .addAllKeyLocationList(keyLocations) .setLatestVersion(latestVersion) + .addAllKeyLocationList(keyLocations) .setCreationTime(creationTime) .setModificationTime(modificationTime) .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) @@ -505,12 +514,10 @@ public OmKeyInfo copyObject() { .setObjectID(objectID).setUpdateID(updateID); - keyLocationVersions.forEach(keyLocationVersion -> { - List keyLocationInfos = new ArrayList<>(); - keyLocationInfos.addAll(keyLocationVersion.getLocationList()); - builder.addOmKeyLocationInfoGroup(new OmKeyLocationInfoGroup( - keyLocationVersion.getVersion(), keyLocationInfos)); - }); + keyLocationVersions.forEach(keyLocationVersion -> + builder.addOmKeyLocationInfoGroup( + new OmKeyLocationInfoGroup(keyLocationVersion.getVersion(), + keyLocationVersion.getLocationList()))); acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), acl.getName(), (BitSet) acl.getAclBitSet().clone(), diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java index b9a292069135..70c71d6d7f32 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java @@ -156,7 +156,15 @@ public OmKeyLocationInfo build() { } } + public KeyLocation getCompactProtobuf() { + return getProtobuf(true); + } + public KeyLocation getProtobuf() { + return getProtobuf(false); + } + + private KeyLocation getProtobuf(boolean ignorePipeline) { KeyLocation.Builder builder = KeyLocation.newBuilder() .setBlockID(blockID.getProtobuf()) .setLength(length) @@ -165,10 +173,12 @@ public KeyLocation getProtobuf() { if (this.token != null) { builder.setToken(OzonePBHelper.protoFromToken(token)); } - try { - builder.setPipeline(pipeline.getProtobufMessage()); - } catch (UnknownPipelineStateException e) { - //TODO: fix me: we should not return KeyLocation without pipeline. + if (!ignorePipeline) { + try { + builder.setPipeline(pipeline.getProtobufMessage()); + } catch (UnknownPipelineStateException e) { + //TODO: fix me: we should not return KeyLocation without pipeline. + } } return builder.build(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java index bd97757cfd04..abaf055f9f2d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java @@ -16,12 +16,13 @@ */ package org.apache.hadoop.ozone.om.helpers; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList; -import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; /** @@ -30,12 +31,23 @@ */ public class OmKeyLocationInfoGroup { private final long version; - private final List locationList; + private final Map> locationVersionMap; public OmKeyLocationInfoGroup(long version, List locations) { this.version = version; - this.locationList = locations; + this.locationVersionMap = locations.stream() + .collect(Collectors.groupingBy(OmKeyLocationInfo::getCreateVersion)); + //prevent NPE + this.locationVersionMap.putIfAbsent(version, new ArrayList<>()); + } + + public OmKeyLocationInfoGroup(long version, + Map> locations) { + this.version = version; + this.locationVersionMap = locations; + //prevent NPE + this.locationVersionMap.putIfAbsent(version, new ArrayList<>()); } /** @@ -44,10 +56,7 @@ public OmKeyLocationInfoGroup(long version, * @return the list of blocks that are created in the latest version. */ public List getBlocksLatestVersionOnly() { - List list = new ArrayList<>(); - locationList.stream().filter(x -> x.getCreateVersion() == version) - .forEach(list::add); - return list; + return new ArrayList<>(locationVersionMap.get(version)); } public long getVersion() { @@ -55,28 +64,40 @@ public long getVersion() { } public List getLocationList() { - return locationList; + return locationVersionMap.values().stream().flatMap(List::stream) + .collect(Collectors.toList()); } - public KeyLocationList getProtobuf() { - return KeyLocationList.newBuilder() - .setVersion(version) - .addAllKeyLocations( - locationList.stream().map(OmKeyLocationInfo::getProtobuf) - .collect(Collectors.toList())) - .build(); + public long getLocationListCount() { + return locationVersionMap.values().stream().mapToLong(List::size).sum(); } - public static OmKeyLocationInfoGroup getFromProtobuf( - KeyLocationList keyLocationList) { - List locations = new ArrayList<>(); - for (KeyLocation keyLocation : keyLocationList - .getKeyLocationsList()) { - locations.add(OmKeyLocationInfo.getFromProtobuf(keyLocation)); + public List getLocationList(Long versionToFetch) { + return new ArrayList<>(locationVersionMap.get(versionToFetch)); + } + + public KeyLocationList getProtobuf(boolean ignorePipeline) { + KeyLocationList.Builder builder = KeyLocationList.newBuilder() + .setVersion(version); + List keyLocationList = + new ArrayList<>(); + for (List locationList : locationVersionMap.values()) { + for (OmKeyLocationInfo keyInfo : locationList) { + keyLocationList.add(ignorePipeline ? + keyInfo.getCompactProtobuf() : keyInfo.getProtobuf()); + } } + return builder.addAllKeyLocations(keyLocationList).build(); + } + public static OmKeyLocationInfoGroup getFromProtobuf( + KeyLocationList keyLocationList) { return new OmKeyLocationInfoGroup( - keyLocationList.getVersion(), locations); + keyLocationList.getVersion(), + keyLocationList.getKeyLocationsList().stream() + .map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.groupingBy(OmKeyLocationInfo::getCreateVersion)) + ); } /** @@ -84,38 +105,42 @@ public static OmKeyLocationInfoGroup getFromProtobuf( * one. * * @param newLocationList a list of new location to be added. - * @return + * @return newly generated OmKeyLocationInfoGroup */ OmKeyLocationInfoGroup generateNextVersion( - List newLocationList) throws IOException { - // TODO : revisit if we can do this method more efficiently - // one potential inefficiency here is that later version always include - // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add - // more - List newList = new ArrayList<>(); - newList.addAll(locationList); - for (OmKeyLocationInfo newInfo : newLocationList) { - // all these new blocks will have addVersion of current version + 1 - newInfo.setCreateVersion(version + 1); - newList.add(newInfo); - } - return new OmKeyLocationInfoGroup(version + 1, newList); + List newLocationList) { + Map> newMap = + new HashMap<>(locationVersionMap); + newMap.put(version + 1, new ArrayList<>(newLocationList)); + return new OmKeyLocationInfoGroup(version + 1, newMap); } - void appendNewBlocks(List newLocationList) - throws IOException { + void appendNewBlocks(List newLocationList) { + List locationList = locationVersionMap.get(version); for (OmKeyLocationInfo info : newLocationList) { info.setCreateVersion(version); locationList.add(info); } } + void removeBlocks(long versionToRemove){ + locationVersionMap.remove(versionToRemove); + } + + void addAll(long versionToAdd, List locationInfoList) { + locationVersionMap.putIfAbsent(versionToAdd, new ArrayList<>()); + List list = locationVersionMap.get(versionToAdd); + list.addAll(locationInfoList); + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("version:").append(version).append(" "); - for (OmKeyLocationInfo kli : locationList) { - sb.append(kli.getLocalID()).append(" || "); + for (List kliList : locationVersionMap.values()) { + for(OmKeyLocationInfo kli: kliList) { + sb.append(kli.getLocalID()).append(" || "); + } } return sb.toString(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRenameKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRenameKeys.java new file mode 100644 index 000000000000..d550817b6a25 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRenameKeys.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import java.util.HashMap; +import java.util.Map; + +/** + * This class is used for rename keys. + */ +public class OmRenameKeys { + + private String volume; + private String bucket; + private Map fromAndToKey = new HashMap<>(); + private Map fromKeyAndToKeyInfo = new HashMap<>(); + + public OmRenameKeys(String volume, String bucket, + Map fromAndToKey, + Map fromKeyAndToKeyInfo) { + this.volume = volume; + this.bucket = bucket; + this.fromAndToKey = fromAndToKey; + this.fromKeyAndToKeyInfo = fromKeyAndToKeyInfo; + } + + public String getVolume() { + return volume; + } + + public String getBucket() { + return bucket; + } + + public Map getFromAndToKey() { + return fromAndToKey; + } + + public Map getFromKeyAndToKeyInfo() { + return fromKeyAndToKeyInfo; + } + +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index 07f3194c14b2..d1491ed6c506 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.helpers; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; import java.nio.file.Paths; @@ -86,4 +87,33 @@ public static String addTrailingSlashIfNeeded(String key) { public static boolean isFile(String keyName) { return !keyName.endsWith(OZONE_URI_DELIMITER); } + + /** + * Whether the pathname is valid. Currently prohibits relative paths, + * names which contain a ":" or "//", or other non-canonical paths. + */ + public static boolean isValidName(String src) { + // Path must be absolute. + if (!src.startsWith(Path.SEPARATOR)) { + return false; + } + + // Check for ".." "." ":" "/" + String[] components = StringUtils.split(src, '/'); + for (int i = 0; i < components.length; i++) { + String element = components[i]; + if (element.equals(".") || + (element.contains(":")) || + (element.contains("/") || element.equals(".."))) { + return false; + } + // The string may start or end with a /, but not have + // "//" in the middle. + if (element.isEmpty() && i != components.length - 1 && + i != 0) { + return false; + } + } + return true; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java index 2ff69c3a5ab2..ca861d3a4b42 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java @@ -130,4 +130,22 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(getTrimmedName()); } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()); + sb.append("{"); + if (keyInfo == null) { + sb.append(""); + } else { + sb.append(getTrimmedName()); + if (isDirectory) { + sb.append(" (dir)"); + } + } + sb.append("}"); + return sb.toString(); + } + } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index c28c2c8abc47..6d7bf2f83ff5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -60,10 +60,15 @@ public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build(); } - public RepeatedKeyInfo getProto() { + /** + * + * @param compact, true for persistence, false for network transmit + * @return + */ + public RepeatedKeyInfo getProto(boolean compact) { List list = new ArrayList<>(); for(OmKeyInfo k : omKeyInfoList) { - list.add(k.getProtobuf()); + list.add(k.getProtobuf(compact)); } RepeatedKeyInfo.Builder builder = RepeatedKeyInfo.newBuilder() diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index b342ef21b890..267ac89be03f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.DBUpdates; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDeleteKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -38,6 +39,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -216,6 +218,14 @@ OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, */ void renameKey(OmKeyArgs args, String toKeyName) throws IOException; + /** + * Rename existing keys within a bucket. + * @param omRenameKeys Includes volume, bucket, and fromKey toKey name map + * and fromKey name toKey info Map. + * @throws IOException + */ + void renameKeys(OmRenameKeys omRenameKeys) throws IOException; + /** * Deletes an existing key. * @@ -229,10 +239,10 @@ OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, * multiple keys and a single key. Used by deleting files * through OzoneFileSystem. * - * @param args the list args of the key. + * @param deleteKeys * @throws IOException */ - void deleteKeys(List args) throws IOException; + void deleteKeys(OmDeleteKeys deleteKeys) throws IOException; /** * Deletes an existing empty bucket from volume. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index ae2c622be6db..506d84cbca19 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -21,6 +21,7 @@ import java.time.Instant; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import org.apache.hadoop.hdds.annotation.InterfaceAudience; @@ -33,6 +34,7 @@ import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDeleteKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -43,6 +45,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -71,8 +74,9 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclResponse; @@ -119,7 +123,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse; @@ -141,6 +148,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.protobuf.ByteString; + import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.ACCESS_DENIED; @@ -672,6 +680,33 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { return OmKeyInfo.getFromProtobuf(resp.getKeyInfo()); } + @Override + public void renameKeys(OmRenameKeys omRenameKeys) throws IOException { + + List renameKeyList = new ArrayList<>(); + for (Map.Entry< String, String> entry : + omRenameKeys.getFromAndToKey().entrySet()) { + RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() + .setFromKeyName(entry.getKey()) + .setToKeyName(entry.getValue()); + renameKeyList.add(renameKey.build()); + } + + RenameKeysArgs.Builder renameKeyArgs = RenameKeysArgs.newBuilder() + .setVolumeName(omRenameKeys.getVolume()) + .setBucketName(omRenameKeys.getBucket()) + .addAllRenameKeysMap(renameKeyList); + + RenameKeysRequest.Builder reqKeys = RenameKeysRequest.newBuilder() + .setRenameKeysArgs(renameKeyArgs.build()); + + OMRequest omRequest = createOMRequest(Type.RenameKeys) + .setRenameKeysRequest(reqKeys.build()) + .build(); + + handleError(submitRequest(omRequest)); + } + @Override public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder(); @@ -717,22 +752,17 @@ public void deleteKey(OmKeyArgs args) throws IOException { * Deletes existing key/keys. This interface supports delete * multiple keys and a single key. * - * @param args the list args of the key. + * @param deleteKeys * @throws IOException */ @Override - public void deleteKeys(List args) throws IOException { + public void deleteKeys(OmDeleteKeys deleteKeys) throws IOException { DeleteKeysRequest.Builder req = DeleteKeysRequest.newBuilder(); - List keyArgsList = new ArrayList(); - for (OmKeyArgs omKeyArgs : args) { - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(omKeyArgs.getVolumeName()) - .setBucketName(omKeyArgs.getBucketName()) - .setKeyName(omKeyArgs.getKeyName()).build(); - keyArgsList.add(keyArgs); - } - req.addAllKeyArgs(keyArgsList); - + DeleteKeyArgs deletedKeys = DeleteKeyArgs.newBuilder() + .setBucketName(deleteKeys.getBucket()) + .setVolumeName(deleteKeys.getVolume()) + .addAllKeys(deleteKeys.getKeyNames()).build(); + req.setDeleteKeys(deletedKeys); OMRequest omRequest = createOMRequest(Type.DeleteKeys) .setDeleteKeysRequest(req) .build(); @@ -1147,6 +1177,7 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) + .setSortDatanodes(args.getSortDatanodes()) .build(); GetFileStatusRequest req = GetFileStatusRequest.newBuilder() @@ -1360,6 +1391,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) + .setSortDatanodes(args.getSortDatanodes()) .build(); ListStatusRequest listStatusRequest = ListStatusRequest.newBuilder() diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java index 290dd1d4f95f..c0b1ddbd1dd9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java @@ -26,13 +26,17 @@ import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3AUTHINFO; + /** * The token identifier for Ozone Master. */ @@ -77,6 +81,55 @@ public Text getKind() { return KIND_NAME; } + /** Instead of relying on proto serialization, this + * provides explicit serialization for OzoneTokenIdentifier. + * @return byte[] + */ + public byte[] toUniqueSerializedKey() { + DataOutputBuffer buf = new DataOutputBuffer(); + try { + super.write(buf); + WritableUtils.writeVInt(buf, getTokenType().getNumber()); + // Set s3 specific fields. + if (getTokenType().equals(S3AUTHINFO)) { + WritableUtils.writeString(buf, getAwsAccessId()); + WritableUtils.writeString(buf, getSignature()); + WritableUtils.writeString(buf, getStrToSign()); + } else { + WritableUtils.writeString(buf, getOmCertSerialId()); + WritableUtils.writeString(buf, getOmServiceId()); + } + } catch (java.io.IOException e) { + throw new IllegalArgumentException( + "Can't encode the the raw data ", e); + } + return buf.getData(); + } + + /** Instead of relying on proto deserialization, this + * provides explicit deserialization for OzoneTokenIdentifier. + * @return byte[] + */ + public OzoneTokenIdentifier fromUniqueSerializedKey(byte[] rawData) + throws IOException { + DataInputBuffer in = new DataInputBuffer(); + in.reset(rawData, rawData.length); + super.readFields(in); + int type = WritableUtils.readVInt(in); + // Set s3 specific fields. + if (type == S3AUTHINFO.getNumber()) { + this.tokenType = Type.S3AUTHINFO; + setAwsAccessId(WritableUtils.readString(in)); + setSignature(WritableUtils.readString(in)); + setStrToSign(WritableUtils.readString(in)); + } else { + this.tokenType = Type.DELEGATION_TOKEN; + setOmCertSerialId(WritableUtils.readString(in)); + setOmServiceId(WritableUtils.readString(in)); + } + return this; + } + /** * Overrides default implementation to write using Protobuf. * @@ -92,7 +145,6 @@ public void write(DataOutput out) throws IOException { .setRealUser(getRealUser().toString()) .setRenewer(getRenewer().toString()) .setIssueDate(getIssueDate()) - .setMaxDate(getMaxDate()) .setSequenceNumber(getSequenceNumber()) .setMasterKeyId(getMasterKeyId()); @@ -332,4 +384,4 @@ public String toString() { .append(", omServiceId=").append(getOmServiceId()); return buffer.toString(); } -} \ No newline at end of file +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/ExitManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/ExitManager.java new file mode 100644 index 000000000000..4a83c1d8c239 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/ExitManager.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.util; + +import org.apache.ratis.util.ExitUtils; +import org.slf4j.Logger; + +/** + * An Exit Manager used to shutdown service in case of unrecoverable error. + * This class will be helpful to test exit functionality. + */ +public class ExitManager { + + public void exitSystem(int status, String message, Throwable throwable, + Logger log) { + ExitUtils.terminate(1, message, throwable, log); + } +} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java index ab9b4e1c080e..8c527cbd318a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java @@ -26,10 +26,16 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import static org.apache.hadoop.ozone.OmUtils.getOzoneManagerServiceId; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; + +import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -99,5 +105,48 @@ public void testGetOmHAAddressesById() { assertTrue(rpcAddrs.stream().anyMatch( a -> a.getAddress().getHostAddress().equals("1.1.1.3"))); } + + @Test + public void testGetOzoneManagerServiceId() throws IOException { + + // If the above is not configured, look at 'ozone.om.service.ids'. + // If no config is set, return null. (Non HA) + OzoneConfiguration configuration = new OzoneConfiguration(); + assertNull(getOzoneManagerServiceId(configuration)); + + // Verify 'ozone.om.internal.service.id' takes precedence + configuration.set(OZONE_OM_INTERNAL_SERVICE_ID, "om1"); + configuration.set(OZONE_OM_SERVICE_IDS_KEY, "om2,om1"); + String id = getOzoneManagerServiceId(configuration); + assertEquals("om1", id); + + configuration.set(OZONE_OM_SERVICE_IDS_KEY, "om2,om3"); + try { + getOzoneManagerServiceId(configuration); + Assert.fail(); + } catch (IOException ioEx) { + assertTrue(ioEx.getMessage() + .contains("Cannot find the internal service id om1 in [om2, om3]")); + } + + // When internal service ID is not defined. + // Verify if count(ozone.om.service.ids) == 1, return that id. + configuration = new OzoneConfiguration(); + configuration.set(OZONE_OM_SERVICE_IDS_KEY, "om2"); + id = getOzoneManagerServiceId(configuration); + assertEquals("om2", id); + + // Verify if more than count(ozone.om.service.ids) > 1 and internal + // service id is not defined, throw exception + configuration.set(OZONE_OM_SERVICE_IDS_KEY, "om2,om1"); + try { + getOzoneManagerServiceId(configuration); + Assert.fail(); + } catch (IOException ioEx) { + assertTrue(ioEx.getMessage() + .contains("More than 1 OzoneManager ServiceID (ozone.om.service" + + ".ids) configured")); + } + } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java index 15468c7b2f62..650fc910289d 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java @@ -42,10 +42,21 @@ public void protobufConversion() { .setStorageType(StorageType.ARCHIVE) .build(); - OmBucketInfo afterSerialization = - OmBucketInfo.getFromProtobuf(bucket.getProtobuf()); + Assert.assertEquals(bucket, + OmBucketInfo.getFromProtobuf(bucket.getProtobuf())); + } + + @Test + public void protobufConversionOfBucketLink() { + OmBucketInfo bucket = OmBucketInfo.newBuilder() + .setBucketName("bucket") + .setVolumeName("vol1") + .setSourceVolume("otherVol") + .setSourceBucket("someBucket") + .build(); - Assert.assertEquals(bucket, afterSerialization); + Assert.assertEquals(bucket, + OmBucketInfo.getFromProtobuf(bucket.getProtobuf())); } @Test @@ -66,7 +77,10 @@ public void testClone() { /* Clone an omBucketInfo. */ OmBucketInfo cloneBucketInfo = omBucketInfo.copyObject(); - Assert.assertEquals(omBucketInfo, cloneBucketInfo); + Assert.assertNotSame(omBucketInfo, cloneBucketInfo); + Assert.assertEquals("Expected " + omBucketInfo + " and " + cloneBucketInfo + + " to be equal", + omBucketInfo, cloneBucketInfo); /* Reset acl & check not equal. */ omBucketInfo.setAcls(Collections.singletonList(new OzoneAcl( diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyLocationInfoGroup.java new file mode 100644 index 000000000000..0843e0b3e882 --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyLocationInfoGroup.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +/** + * Test OmKeyLocationInfoGroup. + */ +public class TestOmKeyLocationInfoGroup { + + @Test + public void testCreatingAndGetLatestVersionLocations() { + OmKeyLocationInfoGroup testInstance = createTestInstance(); + List latestList = + testInstance.getBlocksLatestVersionOnly(); + Assert.assertEquals(1, latestList.size()); + Assert.assertEquals(2, latestList.get(0).getCreateVersion()); + } + + @Test + public void testGettingPreviousVersions() { + OmKeyLocationInfoGroup testInstance = createTestInstance(); + List list = testInstance.getLocationList(1L); + Assert.assertEquals(2, list.size()); + } + + private OmKeyLocationInfoGroup createTestInstance() { + OmKeyLocationInfo info1 = new OmKeyLocationInfo.Builder().build(); + info1.setCreateVersion(1); + OmKeyLocationInfo info2 = new OmKeyLocationInfo.Builder().build(); + info2.setCreateVersion(1); + OmKeyLocationInfo info3 = new OmKeyLocationInfo.Builder().build(); + info3.setCreateVersion(2); + List locationInfoList = new ArrayList<>(); + locationInfoList.add(info1); + locationInfoList.add(info2); + locationInfoList.add(info3); + return new OmKeyLocationInfoGroup(2, locationInfoList); + } +} diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java similarity index 57% rename from hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java rename to hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java index 149273862a75..7471d539484c 100644 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,24 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.upgrade; -import java.util.concurrent.Callable; +package org.apache.hadoop.ozone.om.helpers; -import picocli.CommandLine.Command; +import org.junit.Assert; +import org.junit.Test; /** - * Command to move blocks between HDFS datanodes. + * Test OzoneFsUtils. */ -@Command(name = "balance", - description = "Move the HDFS blocks for a better distribution " - + "usage.") -public class Balance implements Callable { +public class TestOzoneFsUtils { - @Override - public Void call() throws Exception { - System.err.println("[In-Place upgrade : balance] is not yet supported."); - return null; + @Test + public void testPaths() { + Assert.assertTrue(OzoneFSUtils.isValidName("/a/b")); + Assert.assertFalse(OzoneFSUtils.isValidName("../../../a/b")); + Assert.assertFalse(OzoneFSUtils.isValidName("/./.")); + Assert.assertFalse(OzoneFSUtils.isValidName("/:/")); + Assert.assertFalse(OzoneFSUtils.isValidName("a/b")); + Assert.assertFalse(OzoneFSUtils.isValidName("/a:/b")); + Assert.assertFalse(OzoneFSUtils.isValidName("/a//b")); } - -} \ No newline at end of file +} diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index 9fd212626e78..f678280d5888 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-csi - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone CSI service Apache Hadoop Ozone CSI service jar diff --git a/hadoop-ozone/csi/src/main/proto/proto.lock b/hadoop-ozone/csi/src/main/resources/proto.lock similarity index 100% rename from hadoop-ozone/csi/src/main/proto/proto.lock rename to hadoop-ozone/csi/src/main/resources/proto.lock diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 75eaa8ccadd9..1c8d81e52fcc 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-datanode Apache Hadoop Ozone Datanode jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh index d95c034939b9..99d8d5254504 100755 --- a/hadoop-ozone/dev-support/checks/acceptance.sh +++ b/hadoop-ozone/dev-support/checks/acceptance.sh @@ -28,6 +28,8 @@ fi mkdir -p "$REPORT_DIR" +export OZONE_ACCEPTANCE_SUITE + cd "$DIST_DIR/compose" || exit 1 ./test-all.sh RES=$? diff --git a/hadoop-ozone/dev-support/checks/bats.sh b/hadoop-ozone/dev-support/checks/bats.sh new file mode 100755 index 000000000000..2e1bbadce9aa --- /dev/null +++ b/hadoop-ozone/dev-support/checks/bats.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "${DIR}/../../.." || exit 1 + +REPORT_DIR=${OUTPUT_DIR:-"${DIR}/../../../target/bats"} +mkdir -p "${REPORT_DIR}" +REPORT_FILE="${REPORT_DIR}/summary.txt" + +rm -f "${REPORT_DIR}/output.log" + +find * -path '*/src/test/shell/*' -name '*.bats' -print0 \ + | xargs -0 -n1 bats --formatter tap \ + | tee -a "${REPORT_DIR}/output.log" + +grep '^\(not ok\|#\)' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" + +grep -c '^not ok' "${REPORT_FILE}" > "${REPORT_DIR}/failures" + +if [[ -s "${REPORT_FILE}" ]]; then + exit 1 +fi diff --git a/hadoop-ozone/dev-support/checks/kubernetes.sh b/hadoop-ozone/dev-support/checks/kubernetes.sh new file mode 100755 index 000000000000..7f68da1884ca --- /dev/null +++ b/hadoop-ozone/dev-support/checks/kubernetes.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/kubernetes"} + +OZONE_VERSION=$(grep "" "pom.xml" | sed 's/<[^>]*>//g'| sed 's/^[ \t]*//') +DIST_DIR="$DIR/../../dist/target/ozone-$OZONE_VERSION" + +if [ ! -d "$DIST_DIR" ]; then + echo "Distribution dir is missing. Doing a full build" + "$DIR/build.sh" -Pcoverage +fi + +mkdir -p "$REPORT_DIR" + +cd "$DIST_DIR/kubernetes/examples" || exit 1 +./test-all.sh +RES=$? +cp -r result/* "$REPORT_DIR/" +cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" +exit $RES diff --git a/hadoop-ozone/dev-support/intellij/core-site.xml b/hadoop-ozone/dev-support/intellij/core-site.xml new file mode 100644 index 000000000000..862b32177187 --- /dev/null +++ b/hadoop-ozone/dev-support/intellij/core-site.xml @@ -0,0 +1,27 @@ + + + + + fs.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + + + fs.defaultFS + ofs://localhost/ + + diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index e1f5c7e55c5e..a3a38209d940 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -108,6 +108,7 @@ run cp "${ROOT}/hadoop-ozone/dist/src/shell/hdds/hadoop-config.cmd" "libexec/" run cp "${ROOT}/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh" "libexec/" run cp "${ROOT}/hadoop-ozone/dist/src/shell/ozone/ozone-config.sh" "libexec/" run cp -r "${ROOT}/hadoop-ozone/dist/src/shell/shellprofile.d" "libexec/" +run cp -r "${ROOT}/hadoop-ozone/dist/src/shell/upgrade" "libexec/" run cp "${ROOT}/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh" "sbin/" @@ -121,6 +122,9 @@ run cp -r "${ROOT}/hadoop-ozone/fault-injection-test/network-tests/src/test/blo # Optional documentation, could be missing cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./ +#copy byteman helpers +run cp -r "${ROOT}/dev-support/byteman" "share/ozone/" + #Copy docker compose files #compose files are preprocessed: properties (eg. project.version) are replaced first by maven. run cp -p -R "${ROOT}/hadoop-ozone/dist/target/compose" . @@ -130,3 +134,4 @@ run cp -p -r "${ROOT}/hadoop-ozone/dist/target/Dockerfile" . #workaround for https://issues.apache.org/jira/browse/MRESOURCES-236 find ./compose -name "*.sh" -exec chmod 755 {} \; +find ./kubernetes -name "*.sh" -exec chmod 755 {} \; diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 840f628899b2..a9d877170bff 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -19,16 +19,16 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-dist Apache Hadoop Ozone Distribution jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT UTF-8 true - 20200420-1 + 20200625-1 @@ -50,8 +50,7 @@ *.classpath hadoop-hdds-server-scm,hadoop-ozone-common,hadoop-ozone-csi,hadoop-ozone-datanode,hadoop-ozone-insight, - hadoop-ozone-ozone-manager,hadoop-ozone-recon,hadoop-ozone-s3gateway,hadoop-ozone-tools, - hadoop-ozone-upgrade + hadoop-ozone-ozone-manager,hadoop-ozone-recon,hadoop-ozone-s3gateway,hadoop-ozone-tools @@ -219,10 +218,6 @@ org.apache.hadoop hadoop-hdds-docs - - org.apache.hadoop - hadoop-ozone-upgrade - org.apache.hadoop hadoop-ozone-insight diff --git a/hadoop-ozone/dist/src/main/compose/failing1/.env b/hadoop-ozone/dist/src/main/compose/failing1/.env new file mode 120000 index 000000000000..c9b103fa52e1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/failing1/.env @@ -0,0 +1 @@ +../ozone/.env \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/failing1/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/failing1/docker-compose.yaml new file mode 120000 index 000000000000..76acad508c42 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/failing1/docker-compose.yaml @@ -0,0 +1 @@ +../ozone/docker-compose.yaml \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/failing1/docker-config b/hadoop-ozone/dist/src/main/compose/failing1/docker-config new file mode 120000 index 000000000000..49694527f1bd --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/failing1/docker-config @@ -0,0 +1 @@ +../ozone/docker-config \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/failing1/test.sh b/hadoop-ozone/dist/src/main/compose/failing1/test.sh new file mode 100755 index 000000000000..cb8687f6a4b6 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/failing1/test.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:failing + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +export SECURITY_ENABLED=false +export OZONE_REPLICATION_FACTOR=3 + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env + +execute_robot_test scm failing/test1.robot +execute_robot_test scm failing/test2.robot + +stop_docker_env + +generate_report diff --git a/hadoop-ozone/dist/src/main/compose/failing2/.env b/hadoop-ozone/dist/src/main/compose/failing2/.env new file mode 120000 index 000000000000..c9b103fa52e1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/failing2/.env @@ -0,0 +1 @@ +../ozone/.env \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/failing2/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/failing2/docker-compose.yaml new file mode 120000 index 000000000000..76acad508c42 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/failing2/docker-compose.yaml @@ -0,0 +1 @@ +../ozone/docker-compose.yaml \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/failing2/docker-config b/hadoop-ozone/dist/src/main/compose/failing2/docker-config new file mode 120000 index 000000000000..49694527f1bd --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/failing2/docker-config @@ -0,0 +1 @@ +../ozone/docker-config \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/failing2/test.sh b/hadoop-ozone/dist/src/main/compose/failing2/test.sh new file mode 100755 index 000000000000..cb8687f6a4b6 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/failing2/test.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:failing + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +export SECURITY_ENABLED=false +export OZONE_REPLICATION_FACTOR=3 + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env + +execute_robot_test scm failing/test1.robot +execute_robot_test scm failing/test2.robot + +stop_docker_env + +generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env new file mode 100644 index 000000000000..8446b4a5b015 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} +HADOOP_OPTS= \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml new file mode 100644 index 000000000000..b9f4b60702c6 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml @@ -0,0 +1,93 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3.4" + +# reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) +x-common-config: + &common-config + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} + volumes: + - ../..:/opt/hadoop + env_file: + - docker-config + +x-replication: + &replication + OZONE-SITE.XML_ozone.replication: ${OZONE_REPLICATION_FACTOR:-1} + +services: + datanode: + <<: *common-config + ports: + - 9864 + - 9882 + environment: + <<: *replication + command: ["ozone","datanode"] + om1: + <<: *common-config + environment: + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + <<: *replication + ports: + - 9874 + - 9862 + hostname: om1 + command: ["ozone","om"] + om2: + <<: *common-config + environment: + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + <<: *replication + ports: + - 9874 + - 9862 + hostname: om2 + command: ["ozone","om"] + om3: + <<: *common-config + environment: + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + <<: *replication + ports: + - 9874 + - 9862 + hostname: om3 + command: ["ozone","om"] + scm: + <<: *common-config + ports: + - 9876:9876 + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1} + <<: *replication + command: ["ozone","scm"] + s3g: + <<: *common-config + environment: + <<: *replication + ports: + - 9878:9878 + command: ["ozone","s3g"] + recon: + <<: *common-config + ports: + - 9888:9888 + environment: + <<: *replication + command: ["ozone","recon"] diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config new file mode 100644 index 000000000000..d378a679f816 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CORE-SITE.XML_fs.defaultFS=o3fs://bucket1.volume1.omservice + +OZONE-SITE.XML_ozone.om.service.ids=omservice +OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 +OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 +OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 +OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 +OZONE-SITE.XML_ozone.om.ratis.enable=true + +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_ozone.client.failover.max.attempts=6 +OZONE-SITE.XML_hdds.datanode.dir=/data/hdds + +no_proxy=om1,om2,om3,scm,s3g,recon,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh new file mode 100755 index 000000000000..a14aa9cfd8ce --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +export SECURITY_ENABLED=false +export OZONE_REPLICATION_FACTOR=3 + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env + +execute_robot_test scm basic/ozone-shell-single.robot + +stop_docker_env + +generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config index 8180a10f878c..5c1348422f43 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config @@ -15,6 +15,7 @@ # limitations under the License. CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.xml_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop2-@project.version@.jar no_proxy=om,scm,s3g,recon,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config index af8e7f800405..e80f0284a696 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config @@ -15,6 +15,7 @@ # limitations under the License. CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.xml_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar no_proxy=om,scm,s3g,recon,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config index af8e7f800405..e80f0284a696 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config @@ -15,6 +15,7 @@ # limitations under the License. CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.xml_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-hadoop3-@project.version@.jar no_proxy=om,scm,s3g,recon,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh new file mode 100644 index 000000000000..3a18d4df2860 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) +ALL_RESULT_DIR="$SCRIPT_DIR/result" +mkdir -p "$ALL_RESULT_DIR" +rm "$ALL_RESULT_DIR/*" || true +source "$SCRIPT_DIR/../testlib.sh" + +tests=$(find_tests) +cd "$SCRIPT_DIR" + +RESULT=0 +# shellcheck disable=SC2044 +for t in ${tests}; do + d="$(dirname "${t}")" + + if ! run_test_script "${d}"; then + RESULT=1 + fi + + copy_results "${d}" "${ALL_RESULT_DIR}" +done + diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha-s3/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha-s3/docker-config index b835147ee261..4d47bf0b6f19 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha-s3/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha-s3/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem CORE-SITE.XML_fs.defaultFS=o3fs://bucket.volume.id1 OZONE-SITE.XML_ozone.om.service.ids=id1 OZONE-SITE.XML_ozone.om.nodes.id1=om1,om2,om3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config index 4eb1c8a5b6b7..008c684aeb99 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem CORE-SITE.XML_fs.defaultFS=o3fs://bucket1.volume1.omservice OZONE-SITE.XML_ozone.om.service.ids=omservice OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh index d4efa4f4af00..392112ba5313 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh @@ -39,7 +39,7 @@ execute_robot_test scm topology/loaddata.robot stop_containers datanode_1 datanode_2 datanode_3 -execute_robot_test scm topology/readdata.robot +execute_robot_test scm -N readdata-first-half topology/readdata.robot start_containers datanode_1 datanode_2 datanode_3 @@ -49,7 +49,7 @@ wait_for_port datanode_3 9858 60 stop_containers datanode_4 datanode_5 datanode_6 -execute_robot_test scm topology/readdata.robot +execute_robot_test scm -N readdata-second-half topology/readdata.robot stop_docker_env diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config index 4d53acbad793..f100e823517a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=1GB diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index e0b1d62ade08..2f57831f88bf 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +#suite:unsecure + COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export COMPOSE_DIR @@ -26,24 +28,24 @@ source "$COMPOSE_DIR/../testlib.sh" start_docker_env -#Due to the limitation of the current auditparser test, it should be the -#first test in a clean cluster. - -#Disabling for now, audit parser tool during parse getting exception. -#execute_robot_test om auditparser - execute_robot_test scm lib +execute_robot_test scm ozone-lib execute_robot_test scm basic execute_robot_test scm gdpr -execute_robot_test scm -v SCHEME:ofs ozonefs/ozonefs.robot -execute_robot_test scm -v SCHEME:o3fs ozonefs/ozonefs.robot +for scheme in ofs o3fs; do + for bucket in link bucket; do + execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} -N ozonefs-${scheme}-${bucket} ozonefs/ozonefs.robot + done +done execute_robot_test scm security/ozone-secure-token.robot -execute_robot_test scm s3 +for bucket in link generated; do + execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket} s3 +done execute_robot_test scm recon diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index 0adafdfd60ea..7f1050db8b90 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -67,6 +67,7 @@ HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 CORE-SITE.XML_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs +CORE-SITE.XML_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs CORE-SITE.XML_fs.defaultFS=o3fs://bucket1.volume1/ MAPRED-SITE.XML_mapreduce.framework.name=yarn diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config index e245b7e01998..c332448a9508 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-om-ha/docker-config @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem CORE-SITE.XML_fs.defaultFS=o3fs://bucket.volume.id1 OZONE-SITE.XML_ozone.om.service.ids=id1 OZONE-SITE.XML_ozone.om.internal.service.id=id1 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config index abb30453e9c7..30e1816ab8c1 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem OZONE-SITE.XML_ozone.om.volume.listall.allowed=false OZONE-SITE.XML_ozone.om.address=om diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh index 9c3f3ab83cc7..eeccb849eedc 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +#suite:secure + COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export COMPOSE_DIR @@ -23,18 +25,27 @@ source "$COMPOSE_DIR/../testlib.sh" export SECURITY_ENABLED=true +: ${OZONE_BUCKET_KEY_NAME:=key1} + start_docker_env +execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} + execute_robot_test scm kinit.robot execute_robot_test scm basic execute_robot_test scm security -execute_robot_test scm -v SCHEME:ofs ozonefs/ozonefs.robot -execute_robot_test scm -v SCHEME:o3fs ozonefs/ozonefs.robot +for scheme in ofs o3fs; do + for bucket in link bucket; do + execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} -N ozonefs-${scheme}-${bucket} ozonefs/ozonefs.robot + done +done -execute_robot_test s3g s3 +for bucket in link generated; do + execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} s3 +done #expects 4 pipelines, should be run before #admincli which creates STANDALONE pipeline diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index e7f6f7123f09..45a3c52d52f1 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -25,31 +25,31 @@ PROJECT_DIR="$SCRIPT_DIR/.." mkdir -p "$ALL_RESULT_DIR" rm "$ALL_RESULT_DIR/*" || true +source "$SCRIPT_DIR"/testlib.sh + if [ "$OZONE_WITH_COVERAGE" ]; then java -cp "$PROJECT_DIR"/share/coverage/$(ls "$PROJECT_DIR"/share/coverage | grep test-util):"$PROJECT_DIR"/share/coverage/jacoco-core.jar org.apache.hadoop.test.JacocoServer & DOCKER_BRIDGE_IP=$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}') export HADOOP_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*" fi +tests=$(find_tests) +cd "$SCRIPT_DIR" + RESULT=0 -IFS=$'\n' # shellcheck disable=SC2044 -for test in $(find "$SCRIPT_DIR" -name test.sh | grep "${OZONE_TEST_SELECTOR:-""}" |sort); do - echo "Executing test in $(dirname "$test")" - - #required to read the .env file from the right location - cd "$(dirname "$test")" || continue - ./test.sh - ret=$? - if [[ $ret -ne 0 ]]; then - RESULT=1 - echo "ERROR: Test execution of $(dirname "$test") is FAILED!!!!" +for t in ${tests}; do + d="$(dirname "${t}")" + + if ! run_test_script "${d}"; then + RESULT=1 fi - RESULT_DIR="$(dirname "$test")/result" - cp "$RESULT_DIR"/robot-*.xml "$RESULT_DIR"/docker-*.log "$RESULT_DIR"/*.out* "$ALL_RESULT_DIR"/ + + copy_results "${d}" "${ALL_RESULT_DIR}" done -rebot -N "smoketests" -d "$SCRIPT_DIR/result" "$SCRIPT_DIR/result/robot-*.xml" +rebot --nostatusrc -N acceptance -d "$ALL_RESULT_DIR" "$ALL_RESULT_DIR"/*.xml + if [ "$OZONE_WITH_COVERAGE" ]; then pkill -f JacocoServer cp /tmp/jacoco-combined.exec "$SCRIPT_DIR"/result diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 15d1664ed80c..db449b90ad9c 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -17,7 +17,6 @@ set -e COMPOSE_ENV_NAME=$(basename "$COMPOSE_DIR") -COMPOSE_FILE=$COMPOSE_DIR/docker-compose.yaml RESULT_DIR=${RESULT_DIR:-"$COMPOSE_DIR/result"} RESULT_DIR_INSIDE="/tmp/smoketest/$(basename "$COMPOSE_ENV_NAME")/result" SMOKETEST_DIR_INSIDE="${OZONE_DIR:-/opt/hadoop}/smoketest" @@ -32,17 +31,39 @@ fi ## @description create results directory, purging any prior data create_results_dir() { #delete previous results - rm -rf "$RESULT_DIR" + [[ "${OZONE_KEEP_RESULTS:-}" == "true" ]] || rm -rf "$RESULT_DIR" mkdir -p "$RESULT_DIR" #Should be writeable from the docker containers where user is different. chmod ogu+w "$RESULT_DIR" } +## @description find all the test.sh scripts in the immediate child dirs +find_tests(){ + if [[ -n "${OZONE_ACCEPTANCE_SUITE}" ]]; then + tests=$(find . -mindepth 2 -maxdepth 2 -name test.sh | xargs grep -l "^#suite:${OZONE_ACCEPTANCE_SUITE}$" | sort) + + # 'misc' is default suite, add untagged tests, too + if [[ "misc" == "${OZONE_ACCEPTANCE_SUITE}" ]]; then + untagged="$(find . -mindepth 2 -maxdepth 2 -name test.sh | xargs grep -L "^#suite:")" + if [[ -n "${untagged}" ]]; then + tests=$(echo ${tests} ${untagged} | xargs -n1 | sort) + fi + fi + + if [[ -z "${tests}" ]]; then + echo "No tests found for suite ${OZONE_ACCEPTANCE_SUITE}" + exit 1 + fi + else + tests=$(find . -mindepth 2 -maxdepth 2 -name test.sh | grep "${OZONE_TEST_SELECTOR:-""}" | sort) + fi + echo $tests +} ## @description wait until safemode exit (or 180 seconds) -## @param the docker-compose file wait_for_safemode_exit(){ - local compose_file=$1 + # version-dependent + : ${OZONE_SAFEMODE_STATUS_COMMAND:=ozone admin safemode status --verbose} #Reset the timer SECONDS=0 @@ -51,11 +72,11 @@ wait_for_safemode_exit(){ while [[ $SECONDS -lt 180 ]]; do #This line checks the safemode status in scm - local command="ozone admin safemode status" + local command="${OZONE_SAFEMODE_STATUS_COMMAND}" if [[ "${SECURITY_ENABLED}" == 'true' ]]; then - status=$(docker-compose -f "${compose_file}" exec -T scm bash -c "kinit -k HTTP/scm@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab && $command" || true) + status=$(docker-compose exec -T scm bash -c "kinit -k HTTP/scm@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab && $command" || true) else - status=$(docker-compose -f "${compose_file}" exec -T scm bash -c "$command") + status=$(docker-compose exec -T scm bash -c "$command") fi echo $status @@ -80,9 +101,9 @@ start_docker_env(){ create_results_dir export OZONE_SAFEMODE_MIN_DATANODES="${datanode_count}" - docker-compose -f "$COMPOSE_FILE" --no-ansi down - if ! { docker-compose -f "$COMPOSE_FILE" --no-ansi up -d --scale datanode="${datanode_count}" \ - && wait_for_safemode_exit "$COMPOSE_FILE"; }; then + docker-compose --no-ansi down + if ! { docker-compose --no-ansi up -d --scale datanode="${datanode_count}" \ + && wait_for_safemode_exit ; }; then OUTPUT_NAME="$COMPOSE_ENV_NAME" stop_docker_env return 1 @@ -114,11 +135,11 @@ execute_robot_test(){ OUTPUT_PATH="$RESULT_DIR_INSIDE/${OUTPUT_FILE}" # shellcheck disable=SC2068 - docker-compose -f "$COMPOSE_FILE" exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" \ - && docker-compose -f "$COMPOSE_FILE" exec -T "$CONTAINER" robot -v OM_SERVICE_ID:"${OM_SERVICE_ID}" -v SECURITY_ENABLED:"${SECURITY_ENABLED}" -v OM_HA_PARAM:"${OM_HA_PARAM}" ${ARGUMENTS[@]} --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" + docker-compose exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" \ + && docker-compose exec -T "$CONTAINER" robot -v OM_SERVICE_ID:"${OM_SERVICE_ID}" -v SECURITY_ENABLED:"${SECURITY_ENABLED}" -v OM_HA_PARAM:"${OM_HA_PARAM}" -v KEY_NAME:"${OZONE_BUCKET_KEY_NAME}" ${ARGUMENTS[@]} --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" local -i rc=$? - FULL_CONTAINER_NAME=$(docker-compose -f "$COMPOSE_FILE" ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}') + FULL_CONTAINER_NAME=$(docker-compose ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}') docker cp "$FULL_CONTAINER_NAME:$OUTPUT_PATH" "$RESULT_DIR/" copy_daemon_logs @@ -135,7 +156,7 @@ execute_robot_test(){ ## @description Copy any 'out' files for daemon processes to the result dir copy_daemon_logs() { local c f - for c in $(docker-compose -f "$COMPOSE_FILE" ps | grep "^${COMPOSE_ENV_NAME}_" | awk '{print $1}'); do + for c in $(docker-compose ps | grep "^${COMPOSE_ENV_NAME}_" | awk '{print $1}'); do for f in $(docker exec "${c}" ls -1 /var/log/hadoop | grep -F '.out'); do docker cp "${c}:/var/log/hadoop/${f}" "$RESULT_DIR/" done @@ -149,7 +170,7 @@ copy_daemon_logs() { execute_command_in_container(){ set -e # shellcheck disable=SC2068 - docker-compose -f "$COMPOSE_FILE" exec -T "$@" + docker-compose exec -T "$@" set +e } @@ -157,7 +178,7 @@ execute_command_in_container(){ ## @param List of container names, eg datanode_1 datanode_2 stop_containers() { set -e - docker-compose -f "$COMPOSE_FILE" --no-ansi stop $@ + docker-compose --no-ansi stop $@ set +e } @@ -166,7 +187,7 @@ stop_containers() { ## @param List of container names, eg datanode_1 datanode_2 start_containers() { set -e - docker-compose -f "$COMPOSE_FILE" --no-ansi start $@ + docker-compose --no-ansi start $@ set +e } @@ -185,7 +206,7 @@ wait_for_port(){ while [[ $SECONDS -lt $timeout ]]; do set +e - docker-compose -f "${COMPOSE_FILE}" exec -T scm /bin/bash -c "nc -z $host $port" + docker-compose exec -T scm /bin/bash -c "nc -z $host $port" status=$? set -e if [ $status -eq 0 ] ; then @@ -202,9 +223,9 @@ wait_for_port(){ ## @description Stops a docker-compose based test environment (with saving the logs) stop_docker_env(){ - docker-compose -f "$COMPOSE_FILE" --no-ansi logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log" + docker-compose --no-ansi logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log" if [ "${KEEP_RUNNING:-false}" = false ]; then - docker-compose -f "$COMPOSE_FILE" --no-ansi down + docker-compose --no-ansi down fi } @@ -226,3 +247,39 @@ generate_report(){ exit 1 fi } + +## @description Copy results of a single test environment to the "all tests" dir. +copy_results() { + local test_dir="$1" + local all_result_dir="$2" + + local result_dir="${test_dir}/result" + local test_dir_name=$(basename ${test_dir}) + if [[ -n "$(find "${result_dir}" -name "*.xml")" ]]; then + rebot --nostatusrc -N "${test_dir_name}" -o "${all_result_dir}/${test_dir_name}.xml" "${result_dir}/*.xml" + fi + + cp "${result_dir}"/docker-*.log "${all_result_dir}"/ + if [[ -n "$(find "${result_dir}" -name "*.out")" ]]; then + cp "${result_dir}"/*.out* "${all_result_dir}"/ + fi +} + +run_test_script() { + local d="$1" + + echo "Executing test in ${d}" + + #required to read the .env file from the right location + cd "${d}" || return + + ret=0 + if ! ./test.sh; then + ret=1 + echo "ERROR: Test execution of ${d} is FAILED!!!!" + fi + + cd - > /dev/null + + return ${ret} +} diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/.env b/hadoop-ozone/dist/src/main/compose/upgrade/.env new file mode 100644 index 000000000000..6f757c552b2e --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/.env @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +HDDS_VERSION=${hdds.version} +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} +OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} +OZONE_DIR=/opt/hadoop +OZONE_VOLUME=. diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/README.md b/hadoop-ozone/dist/src/main/compose/upgrade/README.md new file mode 100644 index 000000000000..5d844e006b50 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/README.md @@ -0,0 +1,29 @@ + + +# Compose file for upgrade + +This directory contains a sample cluster definition and script for +testing upgrade from previous version to the current one. + +Data for each container is persisted in mounted volume (by default it's +`data` under the `compose/upgrade` directory, but can be overridden via +`OZONE_VOLUME` environment variable). + +Prior version is run using an official `apache/ozone` image, while the +current version is run with the `ozone-runner` image using locally built +source code. + +Currently the test script only supports a single version upgrade (eg. +from 0.5.0 to 1.0.0). diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/docker-compose.yaml new file mode 100644 index 000000000000..ae527593dcd4 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/docker-compose.yaml @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3.4" + +# reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) +x-common-config: + &common-config + env_file: + - docker-config + image: ${OZONE_IMAGE} + +x-replication: + &replication + OZONE-SITE.XML_ozone.replication: ${OZONE_REPLICATION_FACTOR:-1} + +x-datanode: + &datanode + command: ["ozone","datanode"] + <<: *common-config + environment: + <<: *replication + ports: + - 9864 + - 9882 + +services: + dn1: + <<: *datanode + networks: + net: + ipv4_address: 10.9.0.11 + volumes: + - ${OZONE_VOLUME}/dn1:/data + - ../..:${OZONE_DIR} + dn2: + <<: *datanode + networks: + net: + ipv4_address: 10.9.0.12 + volumes: + - ${OZONE_VOLUME}/dn2:/data + - ../..:${OZONE_DIR} + dn3: + <<: *datanode + networks: + net: + ipv4_address: 10.9.0.13 + volumes: + - ${OZONE_VOLUME}/dn3:/data + - ../..:${OZONE_DIR} + om: + command: ["ozone","om"] + <<: *common-config + environment: + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + <<: *replication + networks: + net: + ipv4_address: 10.9.0.14 + ports: + - 9874:9874 + - 9862:9862 + volumes: + - ${OZONE_VOLUME}/om:/data + - ../..:${OZONE_DIR} + recon: + command: ["ozone","recon"] + <<: *common-config + environment: + <<: *replication + networks: + net: + ipv4_address: 10.9.0.15 + ports: + - 9888:9888 + volumes: + - ${OZONE_VOLUME}/recon:/data + - ../..:${OZONE_DIR} + s3g: + command: ["ozone","s3g"] + <<: *common-config + environment: + <<: *replication + networks: + net: + ipv4_address: 10.9.0.16 + ports: + - 9878:9878 + volumes: + - ${OZONE_VOLUME}/s3g:/data + - ../..:${OZONE_DIR} + scm: + command: ["ozone","scm"] + <<: *common-config + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1} + <<: *replication + networks: + net: + ipv4_address: 10.9.0.17 + ports: + - 9876:9876 + volumes: + - ${OZONE_VOLUME}/scm:/data + - ../..:${OZONE_DIR} + +networks: + net: + driver: bridge + ipam: + config: + - subnet: 10.9.0.0/16 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/docker-config new file mode 100644 index 000000000000..ecc994b4ac26 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/docker-config @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem +CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem +OZONE-SITE.XML_ozone.om.address=om +OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 +OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.block.client.address=scm +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon +OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +OZONE-SITE.XML_ozone.recon.address=recon:9891 +OZONE-SITE.XML_recon.om.snapshot.task.interval.delay=1m + +no_proxy=om,scm,s3g,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh new file mode 100644 index 000000000000..0c51325b7c2d --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +: "${OZONE_REPLICATION_FACTOR:=3}" +: "${OZONE_UPGRADE_FROM:="0.5.0"}" +: "${OZONE_UPGRADE_TO:="1.0.0"}" +: "${OZONE_VOLUME:="${COMPOSE_DIR}/data"}" + +export OZONE_VOLUME + +mkdir -p "${OZONE_VOLUME}"/{dn1,dn2,dn3,om,recon,s3g,scm} +mkdir -p "${OZONE_VOLUME}/debug" + +if [[ -n "${OZONE_VOLUME_OWNER}" ]]; then + current_user=$(whoami) + if [[ "${OZONE_VOLUME_OWNER}" != "${current_user}" ]]; then + chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}" \ + || sudo chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}" + fi +fi + +# define version-specifics +export OZONE_DIR=/opt/ozone +export OZONE_IMAGE="apache/ozone:${OZONE_UPGRADE_FROM}" +# shellcheck source=/dev/null +source "${COMPOSE_DIR}/versions/ozone-${OZONE_UPGRADE_FROM}.sh" +# shellcheck source=/dev/null +source "${COMPOSE_DIR}/../testlib.sh" + +# prepare pre-upgrade cluster +start_docker_env +execute_robot_test scm topology/loaddata.robot +stop_docker_env + +# run upgrade scripts +SCRIPT_DIR=../../libexec/upgrade +[[ -f "${SCRIPT_DIR}/${OZONE_UPGRADE_TO}.sh" ]] && "${SCRIPT_DIR}/${OZONE_UPGRADE_TO}.sh" + +# update version-specifics +export OZONE_DIR=/opt/hadoop +unset OZONE_IMAGE # use apache/ozone-runner defined in docker-compose.yaml +# shellcheck source=/dev/null +source "${COMPOSE_DIR}/versions/ozone-${OZONE_UPGRADE_TO}.sh" +# shellcheck source=/dev/null +source "${COMPOSE_DIR}/../testlib.sh" + +# re-start cluster with new version and check after upgrade +export OZONE_KEEP_RESULTS=true +start_docker_env +execute_robot_test scm topology/readdata.robot +stop_docker_env + +generate_report diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md b/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md new file mode 100644 index 000000000000..24cd113469a6 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md @@ -0,0 +1,15 @@ + + +The scripts in this directory define version-specific behavior required for [`testlib.sh`](../../testlib.sh). For example the `ozone admin` command was renamed from `ozone scmcli` in 1.0.0. diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh new file mode 100644 index 000000000000..667ce959026a --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export OZONE_ADMIN_COMMAND=scmcli +export OZONE_SAFEMODE_STATUS_COMMAND='ozone scmcli safemode status' diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh new file mode 100644 index 000000000000..3ff23e0441bc --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export OZONE_ADMIN_COMMAND=admin +export OZONE_SAFEMODE_STATUS_COMMAND='ozone admin safemode status --verbose' diff --git a/hadoop-ozone/dist/src/main/k8s/.gitignore b/hadoop-ozone/dist/src/main/k8s/.gitignore new file mode 100644 index 000000000000..bb9ee6087c6f --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/.gitignore @@ -0,0 +1,15 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +result diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml index 6e638915a247..124f72ff5e58 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: "om-0.om" OZONE-SITE.XML_ozone.scm.client.address: "scm-0.scm" OZONE-SITE.XML_ozone.scm.names: "scm-0.scm" + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: "INFO, stdout" LOG4J.PROPERTIES_log4j.appender.stdout: "org.apache.log4j.ConsoleAppender" LOG4J.PROPERTIES_log4j.appender.stdout.layout: "org.apache.log4j.PatternLayout" diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/onenode.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/onenode.yaml index 882477936adf..19a3e1dd7409 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/onenode.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/onenode.yaml @@ -19,7 +19,7 @@ description: remove scheduling rules to make it possible to run multiple datanod - type: Remove trigger: metadata: - name: ozone-datanode + name: datanode path: - spec - template diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/flekszible.yaml similarity index 100% rename from hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml rename to hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/flekszible.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-deployment.yaml similarity index 100% rename from hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml rename to hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-deployment.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-service.yaml similarity index 100% rename from hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml rename to hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-service.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-volume.yaml similarity index 100% rename from hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml rename to hadoop-ozone/dist/src/main/k8s/definitions/test-webserver/webserver-volume.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml index 00fb72b14e50..f02fb56f089c 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml index c393eada79d6..db91864bdaf3 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml @@ -37,16 +37,6 @@ spec: prometheus.io/port: "9882" prometheus.io/path: /prom spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: kubernetes.io/hostname securityContext: fsGroup: 1000 containers: diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh new file mode 100755 index 000000000000..7d6bdfb981e0 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/test.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export K8S_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +cd "$K8S_DIR" + +# shellcheck source=/dev/null +source "../testlib.sh" + +rm -rf result + +regenerate_resources + +start_k8s_env + +execute_robot_test scm-0 smoketest/basic/basic.robot + +combine_reports + +get_logs + +stop_k8s_env + +revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml index 00fb72b14e50..f02fb56f089c 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh new file mode 100755 index 000000000000..7d6bdfb981e0 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/test.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export K8S_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +cd "$K8S_DIR" + +# shellcheck source=/dev/null +source "../testlib.sh" + +rm -rf result + +regenerate_resources + +start_k8s_env + +execute_robot_test scm-0 smoketest/basic/basic.robot + +combine_reports + +get_logs + +stop_k8s_env + +revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible index 350ea73c50b7..3d9bfcd6ce2d 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible @@ -37,11 +37,11 @@ import: - type: Image image: "@docker.image@" - type: ozone/tracing - - path: pv-test + - path: test-webserver destination: pv-test - path: ozone-csi destination: csi - - path: pv-test + - path: test-webserver destination: pv-test transformations: - type: Namespace diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml index 82f303fafe4e..58076303fbe3 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml index 475ce690b643..b22212ff79ea 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml @@ -37,16 +37,6 @@ spec: prometheus.io/port: "9882" prometheus.io/path: /prom spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: kubernetes.io/hostname securityContext: fsGroup: 1000 containers: diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh new file mode 100755 index 000000000000..7d6bdfb981e0 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/test.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export K8S_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +cd "$K8S_DIR" + +# shellcheck source=/dev/null +source "../testlib.sh" + +rm -rf result + +regenerate_resources + +start_k8s_env + +execute_robot_test scm-0 smoketest/basic/basic.robot + +combine_reports + +get_logs + +stop_k8s_env + +revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible index 2fb527c0a45f..ec6d74533baa 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible @@ -31,7 +31,7 @@ import: destination: pv-test - path: ozone-csi destination: csi - - path: pv-test + - path: test-webserver destination: pv-test transformations: - type: Namespace diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml index c7e8f486e89f..820c1977b4eb 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml @@ -26,6 +26,7 @@ data: OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh b/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh new file mode 100755 index 000000000000..7d6bdfb981e0 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/test.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export K8S_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +cd "$K8S_DIR" + +# shellcheck source=/dev/null +source "../testlib.sh" + +rm -rf result + +regenerate_resources + +start_k8s_env + +execute_robot_test scm-0 smoketest/basic/basic.robot + +combine_reports + +get_logs + +stop_k8s_env + +revert_resources diff --git a/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh new file mode 100755 index 000000000000..ae810c9f679c --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# Test executor to test all the compose/*/test.sh test scripts. +# +SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) + +set -ex + +ALL_RESULT_DIR="$SCRIPT_DIR/result" +rm "$ALL_RESULT_DIR/*" || true +mkdir -p "$ALL_RESULT_DIR" + +RESULT=0 +IFS=$'\n' +# shellcheck disable=SC2044 +for test in $(find "$SCRIPT_DIR" -name test.sh | grep "${OZONE_TEST_SELECTOR:-""}" |sort); do + TEST_DIR="$(dirname $test)" + TEST_NAME="$(basename "$TEST_DIR")" + + echo "" + echo "#### Executing tests of ${TEST_DIR} #####" + echo "" + cd "$TEST_DIR" || continue + ./test.sh + + cp "$TEST_DIR"/result/output.xml "$ALL_RESULT_DIR"/"${TEST_NAME}".xml + mkdir -p "$ALL_RESULT_DIR"/"${TEST_NAME}" + mv "$TEST_DIR"/logs/*log "$ALL_RESULT_DIR"/"${TEST_NAME}"/ +done + +rebot -N "smoketests" -d "$ALL_RESULT_DIR/" "$ALL_RESULT_DIR/*.xml" + diff --git a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh new file mode 100644 index 000000000000..2442cb9a70fe --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh @@ -0,0 +1,144 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +retry() { + n=0 + until [ $n -ge 100 ] + do + "$@" && break + n=$[$n+1] + echo "$n '$@' is failed..." + sleep ${RETRY_SLEEP:-3} + done + if [ $n -eq 100 ]; then + return 255 + fi +} + +grep_log() { + CONTAINER="$1" + PATTERN="$2" + kubectl logs "$1" | grep "$PATTERN" +} + +wait_for_startup(){ + print_phase "Waiting until the k8s cluster is running" + retry all_pods_are_running + retry grep_log scm-0 "SCM exiting safe mode." + retry grep_log om-0 "HTTP server of ozoneManager listening" + print_phase "Cluster is up and running" +} + +all_pods_are_running() { + RUNNING_COUNT=$(kubectl get pod --field-selector status.phase=Running | wc -l) + ALL_COUNT=$(kubectl get pod | wc -l) + RUNNING_COUNT=$((RUNNING_COUNT - 1)) + ALL_COUNT=$((ALL_COUNT - 1)) + if [ "$RUNNING_COUNT" -lt "3" ]; then + echo "$RUNNING_COUNT pods are running. Waiting for more." + return 1 + elif [ "$RUNNING_COUNT" -ne "$ALL_COUNT" ]; then + echo "$RUNNING_COUNT pods are running out from the $ALL_COUNT" + return 2 + else + STARTED=true + return 0 + fi +} + +start_k8s_env() { + print_phase "Deleting existing k8s resources" + #reset environment + kubectl delete statefulset --all + kubectl delete daemonset --all + kubectl delete deployment --all + kubectl delete service --all + kubectl delete configmap --all + kubectl delete pod --all + kubectl delete pvc --all + kubectl delete pv --all + + print_phase "Applying k8s resources from $1" + kubectl apply -f . + wait_for_startup +} + +get_logs() { + mkdir -p logs + for pod in $(kubectl get pods -o custom-columns=NAME:.metadata.name | tail -n +2); do + kubectl logs "${pod}" > "logs/pod-${pod}.log" + done +} + +stop_k8s_env() { + if [ ! "$KEEP_RUNNING" ]; then + kubectl delete -f . + fi +} + +regenerate_resources() { + print_phase "Modifying Kubernetes resources file for test" + echo " (mounting current Ozone directory to the containers, scheduling containers to one node, ...)" + echo "" + echo "WARNING: this test can be executed only with local Kubernetes cluster" + echo " (source dir should be available from K8s nodes)" + echo "" + + PARENT_OF_PARENT=$(realpath ../..) + + if [ $(basename $PARENT_OF_PARENT) == "k8s" ]; then + #running from src dir + OZONE_ROOT=$(realpath ../../../../../target/ozone-0.6.0-SNAPSHOT) + else + #running from dist + OZONE_ROOT=$(realpath ../../..) + fi + + flekszible generate -t mount:hostPath="$OZONE_ROOT",path=/opt/hadoop -t image:image=apache/ozone-runner:20200420-1 -t ozone/onenode +} + +revert_resources() { + print_phase "Regenerating original Kubernetes resource files" + flekszible generate +} + +execute_robot_test() { + print_phase "Executing robot tests $@" + mkdir -p result + + CONTAINER="$1" + shift 1 #Remove first argument which was the container name + + # shellcheck disable=SC2206 + ARGUMENTS=($@) + + kubectl exec -it "${CONTAINER}" -- bash -c 'rm -rf /tmp/report' + kubectl exec -it "${CONTAINER}" -- bash -c 'mkdir -p /tmp/report' + kubectl exec -it "${CONTAINER}" -- robot --nostatusrc -d /tmp/report ${ARGUMENTS[@]} || true + kubectl cp "${CONTAINER}":/tmp/report/output.xml "result/$CONTAINER-$RANDOM.xml" || true +} + +combine_reports() { + rm result/output.xml || true + rebot -d result --nostatusrc -o output.xml -N $(basename "$(pwd)") result/*.xml +} + +print_phase() { + echo "" + echo "**** $1 ****" + echo "" +} diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index 54c8fe2ce206..df63c6e7debb 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -424,7 +424,7 @@ bootstrap.min.js angular-route-1.7.9.min.js angular-nvd3-1.0.9.min.js angular-1.7.9.min.js -jquery-3.4.1.min.js +jquery-3.5.1.min.js -------------------------------------------------------------------------------- recon server uses a huge number of javascript and css dependencies. See the diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot new file mode 100644 index 000000000000..a28888b23f4b --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot @@ -0,0 +1,32 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin command +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes + +*** Test Cases *** +Incomplete command + ${output} = Execute And Ignore Error ozone admin + Should contain ${output} Incomplete command + Should contain ${output} container + Should contain ${output} datanode + Should contain ${output} om + Should contain ${output} pipeline + Should contain ${output} replicationmanager + Should contain ${output} safemode + Should contain ${output} printTopology diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot new file mode 100644 index 000000000000..0560880de79d --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin container command +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes +Suite Setup Create test data + +*** Variables *** +${CONTAINER} + +*** Keywords *** +Create test data + Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Execute ozone freon ockg -n1 -t1 -p container + +*** Test Cases *** +Create container + ${output} = Execute ozone admin container create + Should contain ${output} is created + ${container} = Execute echo "${output}" | grep 'is created' | cut -f2 -d' ' + Set Suite Variable ${CONTAINER} ${container} + +List containers + ${output} = Execute ozone admin container list + Should contain ${output} OPEN + +List containers with explicit host + ${output} = Execute ozone admin container list --scm scm + Should contain ${output} OPEN + +Container info + ${output} = Execute ozone admin container info "${CONTAINER}" + Should contain ${output} Container id: ${CONTAINER} + Should contain ${output} Datanodes + +Close container + Execute ozone admin container close "${CONTAINER}" + ${output} = Execute ozone admin container info "${CONTAINER}" + Should contain ${output} CLOS + +Incomplete command + ${output} = Execute And Ignore Error ozone admin container + Should contain ${output} Incomplete command + Should contain ${output} list + Should contain ${output} info + Should contain ${output} delete + Should contain ${output} create + Should contain ${output} close + +List containers on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose container list --scm unknown-host + Should contain ${output} Invalid host name + diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot index cb16bc8bc86a..b34f3af6255a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot @@ -14,17 +14,22 @@ # limitations under the License. *** Settings *** -Documentation Smoketest ozone cluster startup -Library OperatingSystem +Documentation Test ozone admin datanode command Library BuiltIn Resource ../commonlib.robot Test Timeout 5 minutes -*** Variables *** - - *** Test Cases *** -Run list datanodes +List datanodes ${output} = Execute ozone admin datanode list Should contain ${output} Datanode: - Should contain ${output} Related pipelines: \ No newline at end of file + Should contain ${output} Related pipelines: + +Incomplete command + ${output} = Execute And Ignore Error ozone admin datanode + Should contain ${output} Incomplete command + Should contain ${output} list + +List datanodes on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose datanode list --scm unknown-host + Should contain ${output} Invalid host name diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot index b514ae7b07ad..3a97f8394977 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot @@ -14,21 +14,52 @@ # limitations under the License. *** Settings *** -Documentation Smoketest ozone cluster startup -Library OperatingSystem +Documentation Test ozone admin pipeline command Library BuiltIn Resource ../commonlib.robot Test Timeout 5 minutes *** Variables *** - +${PIPELINE} *** Test Cases *** -Run list pipeline +Create pipeline + ${output} = Execute ozone admin pipeline create + Should contain ${output} is created. Factor: ONE, Type: STAND_ALONE + ${pipeline} = Execute echo "${output}" | grep 'is created' | cut -f1 -d' ' | cut -f2 -d'=' + Set Suite Variable ${PIPELINE} ${pipeline} + +List pipelines ${output} = Execute ozone admin pipeline list - Should contain ${output} Type: - Should contain ${output} Factor:ONE, State: + Should contain ${output} Factor:ONE -Run create pipeline - ${output} = Execute ozone admin pipeline create - Should contain ${output} is created. Factor: ONE, Type: STAND_ALONE \ No newline at end of file +List pipelines with explicit host + ${output} = Execute ozone admin pipeline list --scm scm + Should contain ${output} Factor:ONE + +Deactivate pipeline + Execute ozone admin pipeline deactivate "${PIPELINE}" + ${output} = Execute ozone admin pipeline list | grep "${PIPELINE}" + Should contain ${output} DORMANT + +Activate pipeline + Execute ozone admin pipeline activate "${PIPELINE}" + ${output} = Execute ozone admin pipeline list | grep "${PIPELINE}" + Should contain ${output} OPEN + +Close pipeline + Execute ozone admin pipeline close "${PIPELINE}" + ${output} = Execute ozone admin pipeline list | grep "${PIPELINE}" + Should contain ${output} CLOSED + +Incomplete command + ${output} = Execute And Ignore Error ozone admin pipeline + Should contain ${output} Incomplete command + Should contain ${output} close + Should contain ${output} create + Should contain ${output} deactivate + Should contain ${output} list + +List pipelines on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose pipeline list --scm unknown-host + Should contain ${output} Invalid host name diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot new file mode 100644 index 000000000000..cef294f1e8d7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin replicationmanager command +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes + +*** Test Cases *** +Check replicationmanager + ${output} = Execute ozone admin replicationmanager status + Should contain ${output} ReplicationManager + Should contain ${output} Running + +Check replicationmanager with explicit host + ${output} = Execute ozone admin replicationmanager status --scm scm + Should contain ${output} ReplicationManager + Should contain ${output} Running + +Start replicationmanager + ${output} = Execute ozone admin replicationmanager start + Should contain ${output} Starting ReplicationManager + Wait Until Keyword Succeeds 30sec 5sec Execute ozone admin replicationmanager status | grep -q 'is Running' + +Stop replicationmanager + ${output} = Execute ozone admin replicationmanager stop + Should contain ${output} Stopping ReplicationManager + Wait Until Keyword Succeeds 30sec 5sec Execute ozone admin replicationmanager status | grep -q 'is Not Running' + +Incomplete command + ${output} = Execute And Ignore Error ozone admin replicationmanager + Should contain ${output} Incomplete command + Should contain ${output} start + Should contain ${output} stop + Should contain ${output} status + +Check replicationmanager on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose replicationmanager status --scm unknown-host + Should contain ${output} Invalid host name + diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot new file mode 100644 index 000000000000..114d846e0e07 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin safemode command +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes + +*** Test Cases *** +Check safemode + ${output} = Execute ozone admin safemode status + Should contain ${output} SCM is out of safe mode + +Check safemode with explicit host + ${output} = Execute ozone admin safemode status --scm scm + Should contain ${output} SCM is out of safe mode + +Wait for safemode exit + ${output} = Execute ozone admin safemode wait -t 2 + Should contain ${output} SCM is out of safe mode + +Incomplete command + ${output} = Execute And Ignore Error ozone admin safemode + Should contain ${output} Incomplete command + Should contain ${output} status + Should contain ${output} exit + Should contain ${output} wait + +Check safemode on unknown host + ${output} = Execute And Ignore Error ozone admin --verbose safemode status --scm unknown-host + Should contain ${output} Invalid host name + diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/links.robot b/hadoop-ozone/dist/src/main/smoketest/basic/links.robot new file mode 100644 index 000000000000..71c046e18a25 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/basic/links.robot @@ -0,0 +1,152 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test bucket links via Ozone CLI +Library OperatingSystem +Resource ../commonlib.robot +Resource ../ozone-lib/shell.robot +Test Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab +Test Timeout 2 minute +Suite Setup Create volumes + +*** Variables *** +${prefix} generated + +*** Keywords *** +Create volumes + ${random} = Generate Random String 5 [NUMBERS] + Set Suite Variable ${source} ${random}-source + Set Suite Variable ${target} ${random}-target + Execute ozone sh volume create ${source} + Execute ozone sh volume create ${target} + Run Keyword if '${SECURITY_ENABLED}' == 'true' Setup ACL tests + +Setup ACL tests + Execute ozone sh bucket create ${source}/readable-bucket + Execute ozone sh key put ${source}/readable-bucket/key-in-readable-bucket /etc/passwd + Execute ozone sh bucket create ${source}/unreadable-bucket + Execute ozone sh bucket link ${source}/readable-bucket ${target}/readable-link + Execute ozone sh bucket link ${source}/readable-bucket ${target}/unreadable-link + Execute ozone sh bucket link ${source}/unreadable-bucket ${target}/link-to-unreadable-bucket + Execute ozone sh volume addacl --acl user:testuser2/scm@EXAMPLE.COM:r ${target} + Execute ozone sh volume addacl --acl user:testuser2/scm@EXAMPLE.COM:rl ${source} + Execute ozone sh bucket addacl --acl user:testuser2/scm@EXAMPLE.COM:rl ${source}/readable-bucket + Execute ozone sh bucket addacl --acl user:testuser2/scm@EXAMPLE.COM:r ${target}/readable-link + Execute ozone sh bucket addacl --acl user:testuser2/scm@EXAMPLE.COM:r ${target}/link-to-unreadable-bucket + +Can follow link with read access + Execute kdestroy + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute And Ignore Error ozone sh key list ${target}/readable-link + Should Contain ${result} key-in-readable-bucket + +Cannot follow link without read access + Execute kdestroy + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute And Ignore Error ozone sh key list ${target}/unreadable-link + Should Contain ${result} PERMISSION_DENIED + +ACL verified on source bucket + Execute kdestroy + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute ozone sh bucket info ${target}/link-to-unreadable-bucket + Should Contain ${result} link-to-unreadable-bucket + Should Not Contain ${result} PERMISSION_DENIED + ${result} = Execute And Ignore Error ozone sh key list ${target}/link-to-unreadable-bucket + Should Contain ${result} PERMISSION_DENIED + +*** Test Cases *** +Link to non-existent bucket + Execute ozone sh bucket link ${source}/no-such-bucket ${target}/dangling-link + ${result} = Execute And Ignore Error ozone sh key list ${target}/dangling-link + Should Contain ${result} BUCKET_NOT_FOUND + +Key create passthrough + Execute ozone sh bucket link ${source}/bucket1 ${target}/link1 + Execute ozone sh bucket create ${source}/bucket1 + Execute ozone sh key put ${target}/link1/key1 /etc/passwd + Key Should Match Local File ${target}/link1/key1 /etc/passwd + +Key read passthrough + Execute ozone sh key put ${source}/bucket1/key2 /opt/hadoop/NOTICE.txt + Key Should Match Local File ${source}/bucket1/key2 /opt/hadoop/NOTICE.txt + +Key list passthrough + ${target_list} = Execute ozone sh key list ${target}/link1 | jq -r '.name' + ${source_list} = Execute ozone sh key list ${source}/bucket1 | jq -r '.name' + Should Be Equal ${target_list} ${source_list} + Should Contain ${source_list} key1 + Should Contain ${source_list} key2 + +Key delete passthrough + Execute ozone sh key delete ${target}/link1/key2 + ${source_list} = Execute ozone sh key list ${source}/bucket1 | jq -r '.name' + Should Not Contain ${source_list} key2 + +Bucket list contains links + ${result} = Execute ozone sh bucket list ${target} + Should Contain ${result} link1 + Should Contain ${result} dangling-link + +Bucket info shows source + ${result} = Execute ozone sh bucket info ${target}/link1 | jq -r '.sourceVolume, .sourceBucket' | xargs + Should Be Equal ${result} ${source} bucket1 + +Source and target have separate ACLs + Execute ozone sh bucket addacl --acl user:user1:rwxy ${target}/link1 + Verify ACL bucket ${target}/link1 USER user1 READ WRITE READ_ACL WRITE_ACL + Verify ACL bucket ${source}/bucket1 USER user1 ${EMPTY} + + Execute ozone sh bucket addacl --acl group:group2:r ${source}/bucket1 + Verify ACL bucket ${target}/link1 GROUP group2 ${EMPTY} + Verify ACL bucket ${source}/bucket1 GROUP group2 READ + +Buckets and links share namespace + Execute ozone sh bucket link ${source}/bucket2 ${target}/link2 + ${result} = Execute And Ignore Error ozone sh bucket create ${target}/link2 + Should Contain ${result} BUCKET_ALREADY_EXISTS + + Execute ozone sh bucket create ${target}/bucket3 + ${result} = Execute And Ignore Error ozone sh bucket link ${source}/bucket1 ${target}/bucket3 + Should Contain ${result} BUCKET_ALREADY_EXISTS + +Can follow link with read access + Run Keyword if '${SECURITY_ENABLED}' == 'true' Can follow link with read access + +Cannot follow link without read access + Run Keyword if '${SECURITY_ENABLED}' == 'true' Cannot follow link without read access + +ACL verified on source bucket + Run Keyword if '${SECURITY_ENABLED}' == 'true' ACL verified on source bucket + +Loop in link chain is detected + Execute ozone sh bucket link ${target}/loop1 ${target}/loop2 + Execute ozone sh bucket link ${target}/loop2 ${target}/loop3 + Execute ozone sh bucket link ${target}/loop3 ${target}/loop1 + ${result} = Execute And Ignore Error ozone sh key list ${target}/loop2 + Should Contain ${result} DETECTED_LOOP + +Multiple links to same bucket are allowed + Execute ozone sh bucket link ${source}/bucket1 ${target}/link3 + Execute ozone sh key put ${target}/link3/key3 /etc/group + Key Should Match Local File ${target}/link1/key3 /etc/group + +Source bucket not affected by deleting link + Execute ozone sh bucket delete ${target}/link1 + ${bucket_list} = Execute ozone sh bucket list ${target} + Should Not Contain ${bucket_list} link1 + ${source_list} = Execute ozone sh key list ${source}/bucket1 | jq -r '.name' + Should Contain ${source_list} key1 diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot new file mode 100644 index 000000000000..ba0006153d29 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone shell CLI usage +Library OperatingSystem +Resource ../commonlib.robot + +*** Variables *** +${prefix} generated + +*** Keywords *** + +Generate prefix + ${random} = Generate Random String 5 [NUMBERS] + Set Suite Variable ${prefix} ${random} + +Test ozone shell + [arguments] ${protocol} ${server} ${volume} + ${result} = Execute And Ignore Error ozone sh volume info ${protocol}${server}/${volume} + Should contain ${result} VOLUME_NOT_FOUND + ${result} = Execute ozone sh volume create ${protocol}${server}/${volume} --quota 100TB + Should not contain ${result} Failed + ${result} = Execute ozone sh volume list ${protocol}${server}/ | jq -r '. | select(.name=="${volume}")' + Should contain ${result} creationTime + ${result} = Execute ozone sh volume list | jq -r '. | select(.name=="${volume}")' + Should contain ${result} creationTime +# TODO: Disable updating the owner, acls should be used to give access to other user. + Execute ozone sh volume update ${protocol}${server}/${volume} --quota 10TB +# ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' +# Should Be Equal ${result} bill + ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | jq -r '. | select(.name=="${volume}") | .quota' + Should Be Equal ${result} 10995116277760 + Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 + ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="bb1") | .storageType' + Should Be Equal ${result} DISK + ${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | jq -r '. | select(.name=="bb1") | .volumeName' + Should Be Equal ${result} ${volume} + Run Keyword Test key handling ${protocol} ${server} ${volume} + Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1 + Execute ozone sh volume delete ${protocol}${server}/${volume} + +Test Volume Acls + [arguments] ${protocol} ${server} ${volume} + Execute ozone sh volume create ${protocol}${server}/${volume} + ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . + ${result} = Execute ozone sh volume addacl ${protocol}${server}/${volume} -a user:superuser1:rwxy[DEFAULT] + ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" . + ${result} = Execute ozone sh volume removeacl ${protocol}${server}/${volume} -a user:superuser1:xy + ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" . + ${result} = Execute ozone sh volume setacl ${protocol}${server}/${volume} -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT] + ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" . + Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" . + +Test Bucket Acls + [arguments] ${protocol} ${server} ${volume} + Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 + ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . + ${result} = Execute ozone sh bucket addacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:rwxy + ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + ${result} = Execute ozone sh bucket removeacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:xy + ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\" + ${result} = Execute ozone sh bucket setacl ${protocol}${server}/${volume}/bb1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT] + ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" . + + +Test key handling + [arguments] ${protocol} ${server} ${volume} + Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key1 /opt/hadoop/NOTICE.txt + Execute rm -f /tmp/NOTICE.txt.1 + Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1 /tmp/NOTICE.txt.1 + Execute diff -q /opt/hadoop/NOTICE.txt /tmp/NOTICE.txt.1 + + Execute ozone sh key put -t RATIS ${protocol}${server}/${volume}/bb1/key1_RATIS /opt/hadoop/NOTICE.txt + Execute rm -f /tmp/key1_RATIS + Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1_RATIS /tmp/key1_RATIS + Execute diff -q /opt/hadoop/NOTICE.txt /tmp/key1_RATIS + ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1_RATIS | jq -r '. | select(.name=="key1_RATIS")' + Should contain ${result} RATIS + Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key1_RATIS + + Execute ozone sh key cp ${protocol}${server}/${volume}/bb1 key1 key1-copy + Execute rm -f /tmp/key1-copy + Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1-copy /tmp/key1-copy + Execute diff -q /opt/hadoop/NOTICE.txt /tmp/key1-copy + Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key1-copy + + ${result} = Execute And Ignore Error ozone sh key get ${protocol}${server}/${volume}/bb1/key1 /tmp/NOTICE.txt.1 + Should Contain ${result} NOTICE.txt.1 exists + ${result} = Execute ozone sh key get --force ${protocol}${server}/${volume}/bb1/key1 /tmp/NOTICE.txt.1 + Should Not Contain ${result} NOTICE.txt.1 exists + ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | jq -r '. | select(.name=="key1")' + Should contain ${result} creationTime + ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="key1") | .name' + Should Be Equal ${result} key1 + Execute ozone sh key rename ${protocol}${server}/${volume}/bb1 key1 key2 + ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | jq -r '.name' + Should Be Equal ${result} key2 + Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key2 + +Test key Acls + [arguments] ${protocol} ${server} ${volume} + Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key2 /opt/hadoop/NOTICE.txt + ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . + ${result} = Execute ozone sh key addacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:rwxy + ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + ${result} = Execute ozone sh key removeacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:xy + ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\" + ${result} = Execute ozone sh key setacl ${protocol}${server}/${volume}/bb1/key2 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc + ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 + Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" + Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-single.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-single.robot new file mode 100644 index 000000000000..e08ee0933f98 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-single.robot @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone shell CLI usage +Library OperatingSystem +Resource ../commonlib.robot +Resource ozone-shell-lib.robot +Test Timeout 2 minute +Suite Setup Generate prefix + +*** Test Cases *** + +Test ozone shell + Test ozone shell ${EMPTY} ${EMPTY} ${prefix}-rpcbasic diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot index 9143f38dcf36..45b2d35a6cee 100644 --- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot +++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot @@ -17,18 +17,11 @@ Documentation Test ozone shell CLI usage Library OperatingSystem Resource ../commonlib.robot +Resource ozone-shell-lib.robot Test Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab Test Timeout 2 minute Suite Setup Generate prefix -*** Variables *** -${prefix} generated - -*** Keywords *** -Generate prefix - ${random} = Generate Random String 5 [NUMBERS] - Set Suite Variable ${prefix} ${random} - *** Test Cases *** RpcClient with port Test ozone shell o3:// om:9862 ${prefix}-rpcwoport @@ -47,113 +40,3 @@ RpcClient without host RpcClient without scheme Test ozone shell ${EMPTY} ${EMPTY} ${prefix}-rpcwoscheme - - -*** Keywords *** -Test ozone shell - [arguments] ${protocol} ${server} ${volume} - ${result} = Execute And Ignore Error ozone sh volume info ${protocol}${server}/${volume} - Should contain ${result} VOLUME_NOT_FOUND - ${result} = Execute ozone sh volume create ${protocol}${server}/${volume} --quota 100TB - Should not contain ${result} Failed - ${result} = Execute ozone sh volume list ${protocol}${server}/ | jq -r '. | select(.name=="${volume}")' - Should contain ${result} creationTime - ${result} = Execute ozone sh volume list | jq -r '. | select(.name=="${volume}")' - Should contain ${result} creationTime -# TODO: Disable updating the owner, acls should be used to give access to other user. - Execute ozone sh volume update ${protocol}${server}/${volume} --quota 10TB -# ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' -# Should Be Equal ${result} bill - ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | jq -r '. | select(.name=="${volume}") | .quota' - Should Be Equal ${result} 10995116277760 - Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | jq -r '. | select(.name=="bb1") | .volumeName' - Should Be Equal ${result} ${volume} - Run Keyword Test key handling ${protocol} ${server} ${volume} - Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1 - Execute ozone sh volume delete ${protocol}${server}/${volume} - -Test Volume Acls - [arguments] ${protocol} ${server} ${volume} - Execute ozone sh volume create ${protocol}${server}/${volume} - ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . - ${result} = Execute ozone sh volume addacl ${protocol}${server}/${volume} -a user:superuser1:rwxy[DEFAULT] - ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" . - ${result} = Execute ozone sh volume removeacl ${protocol}${server}/${volume} -a user:superuser1:xy - ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" . - ${result} = Execute ozone sh volume setacl ${protocol}${server}/${volume} -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT] - ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" . - Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" . - -Test Bucket Acls - [arguments] ${protocol} ${server} ${volume} - Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 - ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . - ${result} = Execute ozone sh bucket addacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:rwxy - ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - ${result} = Execute ozone sh bucket removeacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:xy - ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\" - ${result} = Execute ozone sh bucket setacl ${protocol}${server}/${volume}/bb1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT] - ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" . - - -Test key handling - [arguments] ${protocol} ${server} ${volume} - Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key1 /opt/hadoop/NOTICE.txt - Execute rm -f /tmp/NOTICE.txt.1 - Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1 /tmp/NOTICE.txt.1 - Execute diff -q /opt/hadoop/NOTICE.txt /tmp/NOTICE.txt.1 - - Execute ozone sh key put -t RATIS ${protocol}${server}/${volume}/bb1/key1_RATIS /opt/hadoop/NOTICE.txt - Execute rm -f /tmp/key1_RATIS - Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1_RATIS /tmp/key1_RATIS - Execute diff -q /opt/hadoop/NOTICE.txt /tmp/key1_RATIS - ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1_RATIS | jq -r '. | select(.name=="key1_RATIS")' - Should contain ${result} RATIS - Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key1_RATIS - - Execute ozone sh key cp ${protocol}${server}/${volume}/bb1 key1 key1-copy - Execute rm -f /tmp/key1-copy - Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1-copy /tmp/key1-copy - Execute diff -q /opt/hadoop/NOTICE.txt /tmp/key1-copy - Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key1-copy - - ${result} = Execute And Ignore Error ozone sh key get ${protocol}${server}/${volume}/bb1/key1 /tmp/NOTICE.txt.1 - Should Contain ${result} NOTICE.txt.1 exists - ${result} = Execute ozone sh key get --force ${protocol}${server}/${volume}/bb1/key1 /tmp/NOTICE.txt.1 - Should Not Contain ${result} NOTICE.txt.1 exists - ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | jq -r '. | select(.name=="key1")' - Should contain ${result} creationTime - ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="key1") | .name' - Should Be Equal ${result} key1 - Execute ozone sh key rename ${protocol}${server}/${volume}/bb1 key1 key2 - ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | jq -r '.name' - Should Be Equal ${result} key2 - Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key2 - -Test key Acls - [arguments] ${protocol} ${server} ${volume} - Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key2 /opt/hadoop/NOTICE.txt - ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . - ${result} = Execute ozone sh key addacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:rwxy - ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - ${result} = Execute ozone sh key removeacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:xy - ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\" - ${result} = Execute ozone sh key setacl ${protocol}${server}/${volume}/bb1/key2 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc - ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot index 407111a8030c..bf3b3e92d708 100644 --- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot @@ -18,44 +18,14 @@ Library OperatingSystem Library String Library BuiltIn +Resource lib/os.robot + *** Variables *** ${SECURITY_ENABLED} false ${OM_HA_PARAM} ${EMPTY} ${OM_SERVICE_ID} om *** Keywords *** -Execute - [arguments] ${command} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - Should Be Equal As Integers ${rc} 0 - [return] ${output} - -Execute And Ignore Error - [arguments] ${command} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - [return] ${output} - -Execute and checkrc - [arguments] ${command} ${expected_error_code} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - Should Be Equal As Integers ${rc} ${expected_error_code} - [return] ${output} - -Compare files - [arguments] ${file1} ${file2} - ${checksumbefore} = Execute md5sum ${file1} | awk '{print $1}' - ${checksumafter} = Execute md5sum ${file2} | awk '{print $1}' - Should Be Equal ${checksumbefore} ${checksumafter} - -Install aws cli - ${rc} ${output} = Run And Return Rc And Output which apt-get - Run Keyword if '${rc}' == '0' Install aws cli s3 debian - ${rc} ${output} = Run And Return Rc And Output yum --help - Run Keyword if '${rc}' == '0' Install aws cli s3 centos - Kinit HTTP user ${hostname} = Execute hostname Wait Until Keyword Succeeds 2min 10sec Execute kinit -k HTTP/${hostname}@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot index 39e561af6cf0..f7e3274fcedd 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot @@ -29,8 +29,8 @@ Write key *** Test Cases *** Test ozone debug - ${result} = Execute ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.[]' + ${result} = Execute ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.KeyLocations[0][0].Locations' Should contain ${result} files - ${result} = Execute ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.[].files[0]' + ${result} = Execute ozone debug chunkinfo o3://om/vol1/bucket1/debugKey | jq -r '.KeyLocations[0][0].Locations.files[0]' File Should Exist ${result} diff --git a/hadoop-ozone/dist/src/main/smoketest/failing/test1.robot b/hadoop-ozone/dist/src/main/smoketest/failing/test1.robot new file mode 100644 index 000000000000..c1602496c8eb --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/failing/test1.robot @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation This test always fails + +*** Test Cases *** +Failure 1 + Fail diff --git a/hadoop-ozone/dist/src/main/smoketest/failing/test2.robot b/hadoop-ozone/dist/src/main/smoketest/failing/test2.robot new file mode 100644 index 000000000000..d161ffe4bf6d --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/failing/test2.robot @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation This test always fails + +*** Test Cases *** +Failure 2 + Fail diff --git a/hadoop-ozone/dist/src/main/smoketest/lib/os.robot b/hadoop-ozone/dist/src/main/smoketest/lib/os.robot new file mode 100644 index 000000000000..af927f9af7c0 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/lib/os.robot @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Library OperatingSystem + +*** Keywords *** +Execute + [arguments] ${command} + Run Keyword And Return Execute and checkrc ${command} 0 + +Execute And Ignore Error + [arguments] ${command} + ${rc} ${output} = Run And Return Rc And Output ${command} + Log ${output} + [return] ${output} + +Execute and checkrc + [arguments] ${command} ${expected_error_code} + ${rc} ${output} = Run And Return Rc And Output ${command} + Log ${output} + Should Be Equal As Integers ${rc} ${expected_error_code} + [return] ${output} + +Compare files + [arguments] ${file1} ${file2} + ${checksumbefore} = Execute md5sum ${file1} | awk '{print $1}' + ${checksumafter} = Execute md5sum ${file2} | awk '{print $1}' + Should Be Equal ${checksumbefore} ${checksumafter} + +Create Random File + ${postfix} = Generate Random String 5 [NUMBERS] + ${tmpfile} = Set Variable /tmp/tempfile-${postfix} + File Should Not Exist ${tmpfile} + ${content} = Set Variable "Random string" + Create File ${tmpfile} ${content} + [Return] ${tmpfile} diff --git a/hadoop-ozone/dist/src/main/smoketest/lib/os_tests.robot b/hadoop-ozone/dist/src/main/smoketest/lib/os_tests.robot new file mode 100644 index 000000000000..dd4beaf3c161 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/lib/os_tests.robot @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Resource os.robot + + +*** Test Cases *** + +Execute + ${output} = Execute echo 42 + Should Be Equal ${output} 42 + +Execute failing command + Run Keyword And Expect Error * Execute false + +Execute And Ignore Error + ${output} = Execute And Ignore Error echo 123 && false + Should Be Equal ${output} 123 + +Execute and checkrc + ${output} = Execute and checkrc echo failure && exit 1 1 + Should Be Equal ${output} failure + +Execute and checkrc RC mismatch + Run Keyword And Expect Error * Execute and checkrc echo failure && exit 3 1 diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot new file mode 100644 index 000000000000..9afc6df2eabb --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Resource ../lib/os.robot +Library String + + +*** Keywords *** +Bucket Exists + [arguments] ${bucket} + ${rc} ${output} = Run And Return Rc And Output timeout 15 ozone sh bucket info ${bucket} + Return From Keyword If ${rc} != 0 ${FALSE} + Return From Keyword If 'VOLUME_NOT_FOUND' in '''${output}''' ${FALSE} + Return From Keyword If 'BUCKET_NOT_FOUND' in '''${output}''' ${FALSE} + [Return] ${TRUE} + +Compare Key With Local File + [arguments] ${key} ${file} + ${postfix} = Generate Random String 5 [NUMBERS] + ${tmpfile} = Set Variable /tmp/tempkey-${postfix} + Execute ozone sh key get -f ${key} ${tmpfile} + ${rc} = Run And Return Rc diff -q ${file} ${tmpfile} + Execute rm -f ${tmpfile} + ${result} = Set Variable If ${rc} == 0 ${TRUE} ${FALSE} + [Return] ${result} + +Key Should Match Local File + [arguments] ${key} ${file} + ${matches} = Compare Key With Local File ${key} ${file} + Should Be True ${matches} + +Verify ACL + [arguments] ${object_type} ${object} ${type} ${name} ${acls} + ${actual_acls} = Execute ozone sh ${object_type} getacl ${object} | jq -r '.[] | select(.type == "${type}") | select(.name == "${name}") | .aclList[]' | xargs + Should Be Equal ${acls} ${actual_acls} + +Create Random Volume + ${random} = Generate Random String 5 [LOWER] + Execute ozone sh volume create o3://${OM_SERVICE_ID}/vol-${random} + [return] vol-${random} diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot new file mode 100644 index 000000000000..56fbcf8b61f0 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot @@ -0,0 +1,58 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Resource ../lib/os.robot +Resource shell.robot + + +*** Variables *** +${OM_SERVICE_ID} om + + +*** Test Cases *** + +Bucket Exists should not if No Such Volume + ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/no-such-volume/any-bucket + Should Be Equal ${exists} ${FALSE} + +Bucket Exists should not if No Such Bucket + Execute And Ignore Error ozone sh volume create o3://${OM_SERVICE_ID}/vol1 + ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/vol1/no-such-bucket + Should Be Equal ${exists} ${FALSE} + +Bucket Exists + Execute And Ignore Error ozone sh bucket create o3://${OM_SERVICE_ID}/vol1/bucket + ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/vol1/bucket + Should Be Equal ${exists} ${TRUE} + +Bucket Exists should not if No Such OM service + ${exists} = Bucket Exists o3://no-such-host/any-volume/any-bucket + Should Be Equal ${exists} ${FALSE} + + +Key Should Match Local File + [Setup] Execute ozone sh key put o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd + Key Should Match Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd + +Compare Key With Local File with Different File + ${random_file} = Create Random File + ${matches} = Compare Key With Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd ${random_file} + Should Be Equal ${matches} ${FALSE} + [Teardown] Remove File ${random_file} + +Compare Key With Local File if File Does Not Exist + ${matches} = Compare Key With Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd /no-such-file + Should Be Equal ${matches} ${FALSE} diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot index 6d0042b30496..450f1b6d9efc 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot @@ -19,7 +19,7 @@ Library OperatingSystem Resource ../commonlib.robot Resource setup.robot Test Timeout 5 minutes -Suite Setup Setup ${BUCKET_TYPE}s for FS test +Suite Setup Setup for FS test *** Test Cases *** List root diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot index 16e059ede721..441822d7fb3b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot @@ -29,12 +29,12 @@ ${BUCKET_IN_VOL2} ${BUCKET_TYPE}3-${SCHEME} ${DEEP_DIR} test/${SCHEME}/dir *** Keywords *** -Setup buckets for FS test +Setup for FS test Create volumes for FS test - Create buckets for FS test + Run Keyword Create ${BUCKET_TYPE}s for FS test Sanity check for FS test Assign suite vars for FS test - Log Completed setup for ${SCHEME} tests in ${VOLUME}/${BUCKET} using FS base URL: ${BASE_URL} + Log Completed setup for ${SCHEME} tests with ${BUCKET_TYPE}s in ${VOLUME}/${BUCKET} using FS base URL: ${BASE_URL} Create volumes for FS test Execute And Ignore Error ozone sh volume create ${VOLUME} --quota 100TB @@ -45,6 +45,16 @@ Create buckets for FS test Execute ozone sh bucket create ${VOLUME}/${BUCKET2} Execute ozone sh bucket create ${VOL2}/${BUCKET_IN_VOL2} +Create links for FS test + Execute And Ignore Error ozone sh volume create ${VOLUME}-src --quota 100TB + Execute And Ignore Error ozone sh volume create ${VOL2}-src --quota 100TB + Execute ozone sh bucket create ${VOLUME}-src/${BUCKET}-src + Execute ozone sh bucket create ${VOLUME}-src/${BUCKET2}-src + Execute ozone sh bucket create ${VOL2}-src/${BUCKET_IN_VOL2}-src + Execute ozone sh bucket link ${VOLUME}-src/${BUCKET}-src ${VOLUME}/${BUCKET} + Execute ozone sh bucket link ${VOLUME}-src/${BUCKET2}-src ${VOLUME}/${BUCKET2} + Execute ozone sh bucket link ${VOL2}-src/${BUCKET_IN_VOL2}-src ${VOL2}/${BUCKET_IN_VOL2} + Sanity check for FS test ${result} = Execute ozone sh volume list Should contain ${result} ${VOLUME} diff --git a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot index 707384902b75..ada2dd8daf6a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot +++ b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot @@ -28,10 +28,10 @@ ${API_ENDPOINT_URL} http://recon:9888/api/v1 *** Keywords *** Check if Recon picks up container from OM Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/containers + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/containers Should contain ${result} \"ContainerID\":1 - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/utilization/fileCount + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/utilization/fileCount Should contain ${result} \"fileSize\":2048,\"count\":10 *** Test Cases *** @@ -43,13 +43,13 @@ Check if Recon picks up OM data Wait Until Keyword Succeeds 90sec 10sec Check if Recon picks up container from OM Check if Recon picks up DN heartbeats - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/datanodes + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/datanodes Should contain ${result} datanodes Should contain ${result} datanode_1 Should contain ${result} datanode_2 Should contain ${result} datanode_3 - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/pipelines + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/pipelines Should contain ${result} pipelines Should contain ${result} RATIS Should contain ${result} OPEN @@ -57,15 +57,15 @@ Check if Recon picks up DN heartbeats Should contain ${result} datanode_2 Should contain ${result} datanode_3 - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/clusterState + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/clusterState Should contain ${result} \"totalDatanodes\":3 Should contain ${result} \"healthyDatanodes\":3 Should contain ${result} \"pipelines\":4 - ${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/containers/1/replicaHistory + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/containers/1/replicaHistory Should contain ${result} \"containerId\":1 Check if Recon Web UI is up Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user - ${result} = Execute curl --negotiate -u : -v ${ENDPOINT_URL} - Should contain ${result} Ozone Recon \ No newline at end of file + ${result} = Execute curl --negotiate -u : -LSs ${ENDPOINT_URL} + Should contain ${result} Ozone Recon diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index 004a49645918..1c6827a16560 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -88,7 +88,7 @@ Test Multipart Upload Complete #read file and check the key ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key multipartKey1 /tmp/multipartKey1.result - Execute cat /tmp/part1 /tmp/part2 >> /tmp/multipartKey1 + Execute cat /tmp/part1 /tmp/part2 > /tmp/multipartKey1 Compare files /tmp/multipartKey1 /tmp/multipartKey1.result Test Multipart Upload Complete Entity too small @@ -156,7 +156,7 @@ Test Multipart Upload Complete Invalid part errors and complete mpu with few par Should contain ${result} ETag ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key multipartKey3 /tmp/multipartKey3.result - Execute cat /tmp/part1 /tmp/part3 >> /tmp/multipartKey3 + Execute cat /tmp/part1 /tmp/part3 > /tmp/multipartKey3 Compare files /tmp/multipartKey3 /tmp/multipartKey3.result Test abort Multipart upload @@ -237,7 +237,6 @@ Test Multipart Upload Put With Copy Should contain ${result} UploadId ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copytest/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copytest/source - Should contain ${result} ${BUCKET} Should contain ${result} ETag Should contain ${result} LastModified ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 @@ -260,13 +259,11 @@ Test Multipart Upload Put With Copy and range Should contain ${result} UploadId ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=0-10485758 - Should contain ${result} ${BUCKET} Should contain ${result} ETag Should contain ${result} LastModified ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 2 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=10485758-10485760 - Should contain ${result} ${BUCKET} Should contain ${result} ETag Should contain ${result} LastModified ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot index bcba30db94e3..ce7b8254b0d0 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot @@ -23,14 +23,20 @@ Test Timeout 5 minutes Suite Setup Setup s3 tests *** Variables *** -${ENDPOINT_URL} http://s3g:9878 ${BUCKET} generated +${ENDPOINT_URL} http://s3g:9878 + +*** Keywords *** +Create bucket to be deleted + ${bucket} = Run Keyword if '${BUCKET}' == 'link' Create link to-be-deleted + ... ELSE Run Keyword Create bucket + [return] ${bucket} *** Test Cases *** Delete existing bucket -# Bucket already is created in Test Setup. - Execute AWSS3APICli delete-bucket --bucket ${BUCKET} + ${bucket} = Create bucket to be deleted + Execute AWSS3APICli delete-bucket --bucket ${bucket} Delete non-existent bucket ${result} = Execute AWSS3APICli and checkrc delete-bucket --bucket nosuchbucket 255 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot b/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot index 76668716cdba..f3ecd011ceb3 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot @@ -31,5 +31,6 @@ ${BUCKET} generated Head Bucket not existent ${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET} ${result} = Execute AWSS3APICli and checkrc head-bucket --bucket ozonenosuchbucketqqweqwe 255 - Should contain ${result} Bad Request - Should contain ${result} 400 + Should contain ${result} 404 + Should contain ${result} Not Found + diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index 4595587c91af..74dba38657d6 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -15,12 +15,13 @@ *** Settings *** Resource ../commonlib.robot -Resource ../commonlib.robot +Resource ../ozone-lib/shell.robot *** Variables *** +${ENDPOINT_URL} http://s3g:9878 ${OZONE_S3_HEADER_VERSION} v4 ${OZONE_S3_SET_CREDENTIALS} true -${BUCKET} bucket-999 +${BUCKET} generated *** Keywords *** Execute AWSS3APICli @@ -38,6 +39,12 @@ Execute AWSS3Cli ${output} = Execute aws s3 --endpoint-url ${ENDPOINT_URL} ${command} [return] ${output} +Install aws cli + ${rc} ${output} = Run And Return Rc And Output which apt-get + Run Keyword if '${rc}' == '0' Install aws cli s3 debian + ${rc} ${output} = Run And Return Rc And Output yum --help + Run Keyword if '${rc}' == '0' Install aws cli s3 centos + Install aws cli s3 centos Execute sudo -E yum install -y awscli @@ -73,8 +80,9 @@ Setup dummy credentials for S3 Create bucket ${postfix} = Generate Random String 5 [NUMBERS] - Set Suite Variable ${BUCKET} bucket-${postfix} - Create bucket with name ${BUCKET} + ${bucket} = Set Variable bucket-${postfix} + Create bucket with name ${bucket} + [Return] ${bucket} Create bucket with name [Arguments] ${bucket} @@ -85,6 +93,19 @@ Create bucket with name Setup s3 tests Run Keyword Install aws cli Run Keyword if '${OZONE_S3_SET_CREDENTIALS}' == 'true' Setup v4 headers - ${result} = Execute And Ignore Error ozone sh volume create o3://${OM_SERVICE_ID}/s3v - Should not contain ${result} Failed - Run Keyword if '${BUCKET}' == 'generated' Create bucket + ${BUCKET} = Run Keyword if '${BUCKET}' == 'generated' Create bucket + ... ELSE Set Variable ${BUCKET} + Set Suite Variable ${BUCKET} + Run Keyword if '${BUCKET}' == 'link' Setup links for S3 tests + +Setup links for S3 tests + ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/link + Return From Keyword If ${exists} + Execute ozone sh volume create o3://${OM_SERVICE_ID}/legacy + Execute ozone sh bucket create o3://${OM_SERVICE_ID}/legacy/source-bucket + Create link link + +Create link + [arguments] ${bucket} + Execute ozone sh bucket link o3://${OM_SERVICE_ID}/legacy/source-bucket o3://${OM_SERVICE_ID}/s3v/${bucket} + [return] ${bucket} diff --git a/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot b/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot new file mode 100644 index 000000000000..a78f94e5fa97 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test for bucket encryption +Library BuiltIn +Library String +Resource ../commonlib.robot +Resource ../lib/os.robot +Resource ../ozone-lib/shell.robot +Suite Setup Setup Test +Test Timeout 5 minutes + +*** Variables *** +${KEY_NAME} key1 +${VOLUME} + +*** Keywords *** +Setup Test + ${volume} = Create Random Volume + Set Suite Variable ${VOLUME} ${volume} + + +*** Test Cases *** +Create Encrypted Bucket + ${output} = Execute ozone sh bucket create -k ${KEY_NAME} o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket + Should Not Contain ${output} INVALID_REQUEST + Bucket Exists o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket + +Create Key in Encrypted Bucket + ${key} = Set Variable o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket/passwd + ${output} = Execute ozone sh key put ${key} /etc/passwd + Key Should Match Local File ${key} /etc/passwd diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot index 5103e80279c2..70bade5f1e85 100644 --- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot +++ b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot @@ -24,6 +24,8 @@ Test Timeout 5 minutes *** Variables *** ${ENDPOINT_URL} http://s3g:9878 +${TEMPDIR} /tmp +${TEST_FILE} NOTICE.txt *** Keywords *** Setup volume names @@ -38,6 +40,19 @@ Secure S3 test Success ${output} = Execute aws s3api --endpoint-url ${ENDPOINT_URL} list-buckets Should contain ${output} bucket-test123 +Secure S3 put-object test + ${testFilePath} = Set Variable ${TEMPDIR}/${TEST_FILE} + Copy File ${TEST_FILE} ${testFilePath} + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} put-object --bucket=bucket-test123 --key=tmp1/tmp2/NOTICE.txt --body=${testFilePath} + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} list-objects --bucket=bucket-test123 + Should contain ${output} tmp1/tmp2/NOTICE.txt + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} put-object --bucket=bucket-test123 --key=tmp3//tmp4/NOTICE.txt --body=${testFilePath} + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} list-objects --bucket=bucket-test123 + Should contain ${output} tmp3//tmp4/NOTICE.txt + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} put-object --bucket=bucket-test123 --key=//tmp5/tmp6/NOTICE.txt --body=${testFilePath} + ${output} = Execute aws s3api --endpoint ${ENDPOINT_URL} list-objects --bucket=bucket-test123 + Should contain ${output} //tmp5/tmp6/NOTICE.txt + Secure S3 test Failure Run Keyword Setup dummy credentials for S3 ${rc} ${result} = Run And Return Rc And Output aws s3api --endpoint-url ${ENDPOINT_URL} create-bucket --bucket bucket-test123 diff --git a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot index 9c4156fcd6e4..065e390e5b84 100644 --- a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot +++ b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot @@ -30,6 +30,11 @@ ${OM_SERVICE_LIST_URL} http://om:9874/serviceList ${SCM_URL} http://scm:9876 ${RECON_URL} http://recon:9888 +${SCM_CONF_URL} http://scm:9876/conf +${SCM_JMX_URL} http://scm:9876/jmx +${SCM_STACKS_URL} http://scm:9876/stacks + + *** Keywords *** Verify SPNEGO enabled URL [arguments] ${url} @@ -60,6 +65,15 @@ Test OM Service List Test SCM portal Verify SPNEGO enabled URL ${SCM_URL} +Test SCM conf + Verify SPNEGO enabled URL ${SCM_CONF_URL} + +Test SCM jmx + Verify SPNEGO enabled URL ${SCM_JMX_URL} + +Test SCM stacks + Verify SPNEGO enabled URL ${SCM_STACKS_URL} + Test Recon portal Verify SPNEGO enabled URL ${RECON_URL} diff --git a/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh b/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh index 55304916ad1f..1d8096b4baae 100755 --- a/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh +++ b/hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh @@ -16,7 +16,7 @@ # limitations under the License. -# Run a Hadoop command on all slave hosts. +# Run a Hadoop command on all worker hosts. function hadoop_usage { diff --git a/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh b/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh index b46045b2d8c0..769af336cac8 100755 --- a/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh +++ b/hadoop-ozone/dist/src/shell/hdds/hadoop-functions.sh @@ -999,7 +999,7 @@ function hadoop_connect_to_hosts # shellcheck disable=SC2124 local params="$@" local worker_file - local tmpslvnames + local tmp_worker_names # # ssh (or whatever) to a host @@ -1030,10 +1030,10 @@ function hadoop_connect_to_hosts else # no spaces allowed in the pdsh arg host list # shellcheck disable=SC2086 - tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,) + tmp_worker_names=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,) PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \ -f "${HADOOP_SSH_PARALLEL}" \ - -w "${tmpslvnames}" $"${@// /\\ }" 2>&1 + -w "${tmp_worker_names}" $"${@// /\\ }" 2>&1 fi else if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 42e8dcaf28e1..c536484e9b56 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -54,7 +54,6 @@ function hadoop_usage hadoop_add_subcommand "insight" client "tool to get runtime operation information" hadoop_add_subcommand "version" client "print the version" hadoop_add_subcommand "dtutil" client "operations related to delegation tokens" - hadoop_add_subcommand "upgrade" client "HDFS to Ozone in-place upgrade tool" hadoop_add_subcommand "admin" client "Ozone admin tool" hadoop_add_subcommand "debug" client "Ozone debug tool" @@ -214,12 +213,8 @@ function ozonecmd_case HADOOP_CLASSNAME=org.apache.hadoop.security.token.DtUtilShell OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" ;; - upgrade) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.upgrade.InPlaceUpgrade - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-upgrade" - ;; admin) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.admin.OzoneAdmin + HADOOP_CLASSNAME=org.apache.hadoop.hdds.cli.OzoneAdmin OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" ;; debug) diff --git a/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh b/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh new file mode 100755 index 000000000000..65739787ee67 --- /dev/null +++ b/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +: "${SCM_DIR:="${OZONE_VOLUME}/scm"}" +: "${OZONE_RUNNER_VERSION:="20200625-1"}" + +docker run --rm -v "${SCM_DIR}":/scm -v "${SCRIPT_DIR}/1.0.0":/upgrade -w /scm/metadata apache/ozone-runner:"${OZONE_RUNNER_VERSION}" /upgrade/01-migrate-scm-db.sh diff --git a/hadoop-ozone/dist/src/shell/upgrade/1.0.0/01-migrate-scm-db.sh b/hadoop-ozone/dist/src/shell/upgrade/1.0.0/01-migrate-scm-db.sh new file mode 100755 index 000000000000..dee676841972 --- /dev/null +++ b/hadoop-ozone/dist/src/shell/upgrade/1.0.0/01-migrate-scm-db.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo Running upgrade script for HDDS-3499 + +ldb --db=scm.db create_column_family containers +ldb --db=scm.db create_column_family pipelines + +ldb --db=scm-container.db --key_hex --value_hex dump | ldb --db=scm.db --key_hex --value_hex --column_family=containers load +ldb --db=scm-pipeline.db --key_hex --value_hex dump | ldb --db=scm.db --key_hex --value_hex --column_family=pipelines load diff --git a/hadoop-ozone/dist/src/test/shell/compose_testlib.bats b/hadoop-ozone/dist/src/test/shell/compose_testlib.bats new file mode 100644 index 000000000000..058da64f30d7 --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/compose_testlib.bats @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +load ../../main/compose/testlib.sh +@test "Find test recursive, only on one level" { + cd $BATS_TEST_DIRNAME + run find_tests + [[ "$output" == "./test1/test.sh ./test2/test.sh ./test4/test.sh" ]] +} + +@test "Find test by suite" { + OZONE_ACCEPTANCE_SUITE=one + cd $BATS_TEST_DIRNAME + run find_tests + [[ "$output" == "./test4/test.sh" ]] +} + +@test "Find test default suite" { + OZONE_ACCEPTANCE_SUITE=misc + cd $BATS_TEST_DIRNAME + run find_tests + [[ "$output" == "./test1/test.sh ./test2/test.sh" ]] +} diff --git a/hadoop-ozone/dist/src/test/shell/gc_opts.bats b/hadoop-ozone/dist/src/test/shell/gc_opts.bats index 1400a4058f78..feb29af0e35c 100644 --- a/hadoop-ozone/dist/src/test/shell/gc_opts.bats +++ b/hadoop-ozone/dist/src/test/shell/gc_opts.bats @@ -14,14 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. - - # # Can be executed with bats (https://github.com/bats-core/bats-core) -# bats gc_opts.bats (FROM THE CURRENT DIRECTORY) +# bats gc_opts.bats # -source ../../shell/hdds/hadoop-functions.sh +load ../../shell/hdds/hadoop-functions.sh @test "Setting Hadoop GC parameters: add GC params for server" { export HADOOP_SUBCMD_SUPPORTDAEMONIZATION=true export HADOOP_OPTS="Test" diff --git a/hadoop-ozone/dist/src/test/shell/k8s_testlib.bats b/hadoop-ozone/dist/src/test/shell/k8s_testlib.bats new file mode 100644 index 000000000000..4558a1e114c4 --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/k8s_testlib.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export COUNTER=1 + +pass_after_three_failures() { + if [ $COUNTER -eq 3 ]; then + return 0 + fi + COUNTER=$(( COUNTER + 1)) + return 255 +} + +pass_first() { + echo "pass" +} + +pass_never() { + return 255 +} + +load ../../main/k8s/examples/testlib.sh + +@test "Test retry with passing function" { + retry pass_first +} + +@test "Test retry with 3 failures" { + export RETRY_SLEEP=0 + retry pass_after_three_failures +} + +@test "Test retry always failure" { + export RETRY_SLEEP=0 + run retry pass_never + [ "$status" -eq 255 ] +} + + + + + diff --git a/hadoop-ozone/dist/src/test/shell/test1/test.sh b/hadoop-ozone/dist/src/test/shell/test1/test.sh new file mode 100644 index 000000000000..b13ca90d239f --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/test1/test.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/test/shell/test2/test.sh b/hadoop-ozone/dist/src/test/shell/test2/test.sh new file mode 100644 index 000000000000..8dbf5b29721c --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/test2/test.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:misc \ No newline at end of file diff --git a/hadoop-ozone/dist/src/test/shell/test3/subtest1/test.sh b/hadoop-ozone/dist/src/test/shell/test3/subtest1/test.sh new file mode 100644 index 000000000000..8dbf5b29721c --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/test3/subtest1/test.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:misc \ No newline at end of file diff --git a/hadoop-ozone/dist/src/test/shell/test4/test.sh b/hadoop-ozone/dist/src/test/shell/test4/test.sh new file mode 100644 index 000000000000..accc445711e5 --- /dev/null +++ b/hadoop-ozone/dist/src/test/shell/test4/test.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:one \ No newline at end of file diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index 941a9cf354c4..5523150b8585 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -20,9 +20,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-ozone-fault-injection-test org.apache.hadoop - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Mini Ozone Chaos Tests Apache Hadoop Ozone Mini Ozone Chaos Tests diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 2a5cf24603e3..3267976f7670 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -225,7 +225,8 @@ protected void initializeConfiguration() throws IOException { conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY, + OzoneConfigKeys + .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, 4); conf.setInt( OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml index 49c509853cdf..7fe3790b08fc 100644 --- a/hadoop-ozone/fault-injection-test/network-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/network-tests/pom.xml @@ -20,7 +20,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone-fault-injection-test - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-network-tests Apache Hadoop Ozone Network Tests diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index 99b396784a1b..bf933a3b355f 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-fault-injection-test - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Fault Injection Tests Apache Hadoop Ozone Fault Injection Tests pom diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index f40175099922..090aecad341c 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-insight - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Insight Tool Apache Hadoop Ozone Insight Tool jar diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java index 690783ee411b..b4080796be2a 100644 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java +++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java @@ -34,6 +34,10 @@ mixinStandardHelpOptions = true) public class Insight extends GenericCli { + public Insight() { + super(Insight.class); + } + public static void main(String[] args) throws Exception { new Insight().run(args); } diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 622a4077da0c..ebfe1c0057c8 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-integration-test - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Integration Tests Apache Hadoop Ozone Integration Tests jar diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java new file mode 100644 index 000000000000..c4e543554a6a --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -0,0 +1,231 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; +import static org.junit.Assert.fail; + +/** + * Class tests create with object store and getFileStatus. + */ +public class TestOzoneFSWithObjectStoreCreate { + + @Rule + public Timeout timeout = new Timeout(300000); + + private String rootPath; + + private MiniOzoneCluster cluster = null; + + private OzoneFileSystem o3fs; + + private String volumeName; + + private String bucketName; + + + @Before + public void init() throws Exception { + volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); + bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); + + OzoneConfiguration conf = new OzoneConfiguration(); + + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .build(); + cluster.waitForClusterToBeReady(); + + // create a volume and a bucket to be used by OzoneFileSystem + OzoneBucket bucket = + TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); + + rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName, + volumeName); + o3fs = (OzoneFileSystem) FileSystem.get(new URI(rootPath), conf); + } + + + @Test + public void test() throws Exception { + + OzoneVolume ozoneVolume = + cluster.getRpcClient().getObjectStore().getVolume(volumeName); + + OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName); + + String key1 = "///dir1/dir2/file1"; + String key2 = "///dir1/dir2/file2"; + int length = 10; + OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key1, length); + byte[] b = new byte[10]; + Arrays.fill(b, (byte)96); + ozoneOutputStream.write(b); + ozoneOutputStream.close(); + + ozoneOutputStream = ozoneBucket.createKey(key2, length); + ozoneOutputStream.write(b); + ozoneOutputStream.close(); + + // Adding "/" here otherwise Path will be considered as relative path and + // workingDir will be added. + key1 = "///dir1/dir2/file1"; + Path p = new Path(key1); + Assert.assertTrue(o3fs.getFileStatus(p).isFile()); + + p = p.getParent(); + checkAncestors(p); + + + key2 = "///dir1/dir2/file2"; + p = new Path(key2); + Assert.assertTrue(o3fs.getFileStatus(p).isFile()); + checkAncestors(p); + + } + + + @Test + public void testObjectStoreCreateWithO3fs() throws Exception { + OzoneVolume ozoneVolume = + cluster.getRpcClient().getObjectStore().getVolume(volumeName); + + OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName); + + + // Use ObjectStore API to create keys. This similates how s3 create keys. + String parentDir = "/dir1/dir2/dir3/dir4/"; + + + List keys = new ArrayList<>(); + keys.add("/dir1"); + keys.add("/dir1/dir2"); + keys.add("/dir1/dir2/dir3"); + keys.add("/dir1/dir2/dir3/dir4/"); + for (int i=1; i <= 3; i++) { + int length = 10; + String fileName = parentDir.concat("/file" + i + "/"); + keys.add(fileName); + OzoneOutputStream ozoneOutputStream = + ozoneBucket.createKey(fileName, length); + byte[] b = new byte[10]; + Arrays.fill(b, (byte)96); + ozoneOutputStream.write(b); + ozoneOutputStream.close(); + } + + // check + for (int i=1; i <= 3; i++) { + String fileName = parentDir.concat("/file" + i + "/"); + Path p = new Path(fileName); + Assert.assertTrue(o3fs.getFileStatus(p).isFile()); + checkAncestors(p); + } + + // Delete keys with object store api delete + for (int i = 1; i <= 3; i++) { + String fileName = parentDir.concat("/file" + i + "/"); + ozoneBucket.deleteKey(fileName); + } + + + // Delete parent dir via o3fs. + boolean result = o3fs.delete(new Path("/dir1"), true); + Assert.assertTrue(result); + + // No Key should exist. + for(String key : keys) { + checkPath(new Path(key)); + } + + + for (int i=1; i <= 3; i++) { + int length = 10; + String fileName = parentDir.concat("/file" + i + "/"); + OzoneOutputStream ozoneOutputStream = + ozoneBucket.createKey(fileName, length); + byte[] b = new byte[10]; + Arrays.fill(b, (byte)96); + ozoneOutputStream.write(b); + ozoneOutputStream.close(); + } + + o3fs.mkdirs(new Path("/dest")); + o3fs.rename(new Path("/dir1"), new Path("/dest")); + + // No source Key should exist. + for(String key : keys) { + checkPath(new Path(key)); + } + + // check dest path. + for (int i=1; i <= 3; i++) { + String fileName = "/dest/".concat(parentDir.concat("/file" + i + "/")); + Path p = new Path(fileName); + Assert.assertTrue(o3fs.getFileStatus(p).isFile()); + checkAncestors(p); + } + + } + + private void checkPath(Path path) { + try { + o3fs.getFileStatus(path); + fail("testObjectStoreCreateWithO3fs failed for Path" + path); + } catch (IOException ex) { + Assert.assertTrue(ex instanceof FileNotFoundException); + Assert.assertTrue(ex.getMessage().contains("No such file or directory")); + } + } + + private void checkAncestors(Path p) throws Exception { + p = p.getParent(); + while(p.getParent() != null) { + FileStatus fileStatus = o3fs.getFileStatus(p); + Assert.assertTrue(fileStatus.isDirectory()); + p = p.getParent(); + } + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 15a8fc65c5fc..2b8803edc41e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -44,6 +44,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; @@ -97,7 +98,8 @@ public class TestOzoneFileInterfaces { */ @Parameters public static Collection data() { - return Arrays.asList(new Object[][] {{false, true}, {true, false}}); + return Arrays.asList(new Object[][] {{false, true, true}, + {true, false, false}}); } private boolean setDefaultFs; @@ -118,10 +120,13 @@ public static Collection data() { private OMMetrics omMetrics; + private boolean enableFileSystemPaths; + public TestOzoneFileInterfaces(boolean setDefaultFs, - boolean useAbsolutePath) { + boolean useAbsolutePath, boolean enabledFileSystemPaths) { this.setDefaultFs = setDefaultFs; this.useAbsolutePath = useAbsolutePath; + this.enableFileSystemPaths = enabledFileSystemPaths; GlobalStorageStatistics.INSTANCE.reset(); } @@ -131,6 +136,8 @@ public void init() throws Exception { bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + enableFileSystemPaths); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); @@ -350,15 +357,18 @@ public void testListStatus() throws IOException { String dirPath = RandomStringUtils.randomAlphanumeric(5); Path path = createPath("/" + dirPath); paths.add(path); + + long mkdirs = statistics.getLong( + StorageStatistics.CommonStatisticNames.OP_MKDIRS); assertTrue("Makedirs returned with false for the path " + path, fs.mkdirs(path)); + assertCounter(++mkdirs, StorageStatistics.CommonStatisticNames.OP_MKDIRS); long listObjects = statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()); long omListStatus = omMetrics.getNumListStatus(); FileStatus[] statusList = fs.listStatus(createPath("/")); assertEquals(1, statusList.length); - assertEquals(++listObjects, - statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue()); + assertCounter(++listObjects, Statistic.OBJECTS_LIST.getSymbol()); assertEquals(++omListStatus, omMetrics.getNumListStatus()); assertEquals(fs.getFileStatus(path), statusList[0]); @@ -367,11 +377,11 @@ public void testListStatus() throws IOException { paths.add(path); assertTrue("Makedirs returned with false for the path " + path, fs.mkdirs(path)); + assertCounter(++mkdirs, StorageStatistics.CommonStatisticNames.OP_MKDIRS); statusList = fs.listStatus(createPath("/")); assertEquals(2, statusList.length); - assertEquals(++listObjects, - statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue()); + assertCounter(++listObjects, Statistic.OBJECTS_LIST.getSymbol()); assertEquals(++omListStatus, omMetrics.getNumListStatus()); for (Path p : paths) { assertTrue(Arrays.asList(statusList).contains(fs.getFileStatus(p))); @@ -521,4 +531,8 @@ private FileStatus getDirectoryStat(Path path) throws IOException { return status; } + + private void assertCounter(long value, String key) { + assertEquals(value, statistics.getLong(key).longValue()); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index 75107d0c5f22..4e728f7475ef 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -18,8 +18,10 @@ package org.apache.hadoop.fs.ozone; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Set; @@ -32,7 +34,9 @@ import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -40,6 +44,7 @@ import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.security.UserGroupInformation; @@ -47,6 +52,7 @@ import org.apache.commons.io.IOUtils; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -63,33 +69,49 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Ozone file system tests that are not covered by contract tests. + * + * Note: When adding new test(s), please append it in testFileSystem() to + * avoid test run time regression. */ +@RunWith(Parameterized.class) public class TestOzoneFileSystem { + @Parameterized.Parameters + public static Collection data() { + return Arrays.asList(new Object[]{true}, new Object[]{false}); + } + + public TestOzoneFileSystem(boolean setDefaultFs) { + this.enabledFileSystemPaths = setDefaultFs; + } /** - * Set a timeout for each test. - */ + * Set a timeout for each test. + */ @Rule public Timeout timeout = new Timeout(300000); private static final Logger LOG = LoggerFactory.getLogger(TestOzoneFileSystem.class); + private boolean enabledFileSystemPaths; + private MiniOzoneCluster cluster; private FileSystem fs; private OzoneFileSystem o3fs; private String volumeName; private String bucketName; private int rootItemCount; + private Trash trash; - @Test(timeout = 300_000) public void testCreateFileShouldCheckExistenceOfDirWithSameName() - throws Exception { + throws Exception { /* * Op 1. create file -> /d1/d2/d3/d4/key2 * Op 2. create dir -> /d1/d2/d3/d4/key2 @@ -100,7 +122,6 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() * * Op 3. create file -> /d1/d2/d3 (d3 as a file inside /d1/d2) */ - setupOzoneFileSystem(); Path parent = new Path("/d1/d2/d3/d4/"); Path file1 = new Path(parent, "key1"); @@ -134,6 +155,9 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() } catch (FileAlreadyExistsException fae) { // ignore as its expected } + + // Cleanup + fs.delete(new Path("/d1/"), true); } /** @@ -141,14 +165,11 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() * directories. Has roughly the semantics of Unix @{code mkdir -p}. * {@link FileSystem#mkdirs(Path)} */ - @Test(timeout = 300_000) public void testMakeDirsWithAnExistingDirectoryPath() throws Exception { /* * Op 1. create file -> /d1/d2/d3/d4/k1 (d3 is a sub-dir inside /d1/d2) * Op 2. create dir -> /d1/d2 */ - setupOzoneFileSystem(); - Path parent = new Path("/d1/d2/d3/d4/"); Path file1 = new Path(parent, "key1"); try (FSDataOutputStream outputStream = fs.create(file1, false)) { @@ -158,6 +179,31 @@ public void testMakeDirsWithAnExistingDirectoryPath() throws Exception { Path subdir = new Path("/d1/d2/"); boolean status = fs.mkdirs(subdir); assertTrue("Shouldn't send error if dir exists", status); + // Cleanup + fs.delete(new Path("/d1"), true); + } + + public void testCreateWithInvalidPaths() throws Exception { + Path parent = new Path("../../../../../d1/d2/"); + Path file1 = new Path(parent, "key1"); + checkInvalidPath(file1); + + file1 = new Path("/:/:"); + checkInvalidPath(file1); + } + + private void checkInvalidPath(Path path) throws Exception { + FSDataOutputStream outputStream = null; + try { + outputStream = fs.create(path, false); + fail("testCreateWithInvalidPaths failed for path" + path); + } catch (Exception ex) { + Assert.assertTrue(ex instanceof InvalidPathException); + } finally { + if (outputStream != null) { + outputStream.close(); + } + } } @Test(timeout = 300_000) @@ -167,6 +213,13 @@ public void testFileSystem() throws Exception { testOzoneFsServiceLoader(); o3fs = (OzoneFileSystem) fs; + testCreateFileShouldCheckExistenceOfDirWithSameName(); + testMakeDirsWithAnExistingDirectoryPath(); + testCreateWithInvalidPaths(); + testListStatusWithIntermediateDir(); + + testRenameToTrashDisabled(); + testGetTrashRoots(); testGetTrashRoot(); testGetDirectoryModificationTime(); @@ -184,6 +237,8 @@ public void testFileSystem() throws Exception { testRenameDir(); testSeekOnFileLength(); testDeleteRoot(); + + testRecursiveDelete(); } @After @@ -195,11 +250,14 @@ public void tearDown() { } private void setupOzoneFileSystem() - throws IOException, TimeoutException, InterruptedException { + throws IOException, TimeoutException, InterruptedException { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(FS_TRASH_INTERVAL_KEY, 1); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + enabledFileSystemPaths); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); + .setNumDatanodes(3) + .build(); cluster.waitForClusterToBeReady(); // create a volume and a bucket to be used by OzoneFileSystem OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); @@ -207,14 +265,15 @@ private void setupOzoneFileSystem() bucketName = bucket.getName(); String rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), - bucket.getVolumeName()); + OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), + bucket.getVolumeName()); // Set the fs.defaultFS and start the filesystem conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); fs = FileSystem.get(conf); + trash = new Trash(conf); } private void testOzoneFsServiceLoader() throws IOException { @@ -263,7 +322,7 @@ private void testDeleteCreatesFakeParentDir() throws Exception { } // Delete the child key - fs.delete(child, false); + fs.delete(child, true); // Deleting the only child should create the parent dir key if it does // not exist @@ -272,6 +331,78 @@ private void testDeleteCreatesFakeParentDir() throws Exception { assertEquals(parentKey, parentKeyInfo.getName()); } + + private void testRecursiveDelete() throws Exception { + Path grandparent = new Path("/gdir1"); + + for (int i = 1; i <= 10; i++) { + Path parent = new Path(grandparent, "pdir" +i); + Path child = new Path(parent, "child"); + ContractTestUtils.touch(fs, child); + } + + // Delete the grandparent, which should delete all keys. + fs.delete(grandparent, true); + + checkPath(grandparent); + + for (int i = 1; i <= 10; i++) { + Path parent = new Path(grandparent, "dir" +i); + Path child = new Path(parent, "child"); + checkPath(parent); + checkPath(child); + } + + + Path level0 = new Path("/level0"); + + for (int i = 1; i <= 3; i++) { + Path level1 = new Path(level0, "level" +i); + Path level2 = new Path(level1, "level" +i); + Path level1File = new Path(level1, "file1"); + Path level2File = new Path(level2, "file1"); + ContractTestUtils.touch(fs, level1File); + ContractTestUtils.touch(fs, level2File); + } + + // Delete at sub directory level. + for (int i = 1; i <= 3; i++) { + Path level1 = new Path(level0, "level" +i); + Path level2 = new Path(level1, "level" +i); + fs.delete(level2, true); + fs.delete(level1, true); + } + + + // Delete level0 finally. + fs.delete(grandparent, true); + + // Check if it exists or not. + checkPath(grandparent); + + for (int i = 1; i <= 3; i++) { + Path level1 = new Path(level0, "level" +i); + Path level2 = new Path(level1, "level" +i); + Path level1File = new Path(level1, "file1"); + Path level2File = new Path(level2, "file1"); + checkPath(level1); + checkPath(level2); + checkPath(level1File); + checkPath(level2File); + } + + } + + private void checkPath(Path path) { + try { + fs.getFileStatus(path); + fail("testRecursiveDelete failed"); + } catch (IOException ex) { + Assert.assertTrue(ex instanceof FileNotFoundException); + Assert.assertTrue(ex.getMessage().contains("No such file or directory")); + } + } + private void testFileDelete() throws Exception { Path grandparent = new Path("/testBatchDelete"); Path parent = new Path(grandparent, "parent"); @@ -334,9 +465,7 @@ private void testListStatus() throws Exception { 3, fileStatuses.length); } - @Test public void testListStatusWithIntermediateDir() throws Exception { - setupOzoneFileSystem(); String keyName = "object-dir/object-name"; OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) @@ -617,4 +746,35 @@ public void testGetTrashRoots() throws IOException { // Clean up o3fs.delete(trashRoot, true); } + + /** + * Check that no files are actually moved to trash since it is disabled by + * fs.rename(src, dst, options). + */ + public void testRenameToTrashDisabled() throws IOException { + // Create a file + String testKeyName = "testKey1"; + Path path = new Path(OZONE_URI_DELIMITER, testKeyName); + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(1); + } + + // Call moveToTrash. We can't call protected fs.rename() directly + trash.moveToTrash(path); + + // Construct paths + String username = UserGroupInformation.getCurrentUser().getShortUserName(); + Path trashRoot = new Path(OZONE_URI_DELIMITER, TRASH_PREFIX); + Path userTrash = new Path(trashRoot, username); + Path userTrashCurrent = new Path(userTrash, "Current"); + Path trashPath = new Path(userTrashCurrent, testKeyName); + + // Trash Current directory should still have been created. + Assert.assertTrue(o3fs.exists(userTrashCurrent)); + // Check under trash, the key should be deleted instead + Assert.assertFalse(o3fs.exists(trashPath)); + + // Cleanup + o3fs.delete(trashRoot, true); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java index 8c71c61f389d..b812b4530d33 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -39,21 +40,25 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Iterator; @@ -64,48 +69,66 @@ import java.util.TreeSet; import java.util.stream.Collectors; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; /** * Ozone file system tests that are not covered by contract tests. - * TODO: Refactor this and TestOzoneFileSystem later to reduce code duplication. + * TODO: Refactor this and TestOzoneFileSystem to reduce duplication. */ +@RunWith(Parameterized.class) public class TestRootedOzoneFileSystem { + @Parameterized.Parameters + public static Collection data() { + return Arrays.asList(new Object[]{true}, new Object[]{false}); + } + + public TestRootedOzoneFileSystem(boolean setDefaultFs) { + enabledFileSystemPaths = setDefaultFs; + } + @Rule public Timeout globalTimeout = new Timeout(300_000); - private OzoneConfiguration conf; - private MiniOzoneCluster cluster = null; - private FileSystem fs; - private RootedOzoneFileSystem ofs; - private ObjectStore objectStore; + private static boolean enabledFileSystemPaths; + + private static OzoneConfiguration conf; + private static MiniOzoneCluster cluster = null; + private static FileSystem fs; + private static RootedOzoneFileSystem ofs; + private static ObjectStore objectStore; private static BasicRootedOzoneClientAdapterImpl adapter; + private static Trash trash; - private String volumeName; - private Path volumePath; - private String bucketName; + private static String volumeName; + private static Path volumePath; + private static String bucketName; // Store path commonly used by tests that test functionality within a bucket - private Path bucketPath; - private String rootPath; + private static Path bucketPath; + private static String rootPath; - @Before - public void init() throws Exception { + @BeforeClass + public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setInt(FS_TRASH_INTERVAL_KEY, 1); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + enabledFileSystemPaths); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); cluster.waitForClusterToBeReady(); objectStore = cluster.getClient().getObjectStore(); - - String username = UserGroupInformation.getCurrentUser().getUserName(); - + // create a volume and a bucket to be used by RootedOzoneFileSystem (OFS) OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); volumeName = bucket.getVolumeName(); @@ -118,16 +141,17 @@ public void init() throws Exception { // Set the fs.defaultFS and start the filesystem conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - // Note: FileSystem#loadFileSystems won't load OFS class due to META-INF - // hence this workaround. - conf.set("fs.ofs.impl", "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"); + // Set the number of keys to be processed during batch operate. + conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); + // fs.ofs.impl would be loaded from META-INF, no need to manually set it fs = FileSystem.get(conf); + trash = new Trash(conf); ofs = (RootedOzoneFileSystem) fs; adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter(); } - @After - public void teardown() { + @AfterClass + public static void teardown() { if (cluster != null) { cluster.shutdown(); } @@ -137,10 +161,7 @@ public void teardown() { @Test public void testOzoneFsServiceLoader() throws IOException { OzoneConfiguration confTestLoader = new OzoneConfiguration(); - // Note: FileSystem#loadFileSystems won't load OFS class due to META-INF - // hence this workaround. - confTestLoader.set("fs.ofs.impl", - "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"); + // fs.ofs.impl should be loaded from META-INF, no need to explicitly set it Assert.assertEquals(FileSystem.getFileSystemClass( OzoneConsts.OZONE_OFS_URI_SCHEME, confTestLoader), RootedOzoneFileSystem.class); @@ -172,6 +193,9 @@ public void testCreateDoesNotAddParentDirKeys() throws Exception { Assert.assertTrue( "Parent directory does not appear to be a directory", fs.getFileStatus(parent).isDirectory()); + + // Cleanup + fs.delete(grandparent, true); } @Test @@ -238,6 +262,9 @@ public void testListStatus() throws Exception { Assert.assertEquals( "FileStatus did not return all children of the directory", 3, fileStatuses.length); + + // Cleanup + fs.delete(parent, true); } /** @@ -302,6 +329,12 @@ public void testMkdirOnNonExistentVolumeBucketDir() throws Exception { Assert.assertEquals(fileStatuses.length, 0); fileStatuses = ofs.listStatus(dir2); Assert.assertEquals(fileStatuses.length, 0); + + // Cleanup + fs.delete(dir2, true); + fs.delete(dir1, true); + ozoneVolume.deleteBucket(bucketNameLocal); + objectStore.deleteVolume(volumeNameLocal); } /** @@ -329,6 +362,10 @@ public void testMkdirNonExistentVolumeBucket() throws Exception { Assert.assertEquals(bucketNameLocal, ozoneBucket.getName()); // TODO: Use listStatus to check volume and bucket creation in HDDS-2928. + + // Cleanup + ozoneVolume.deleteBucket(bucketNameLocal); + objectStore.deleteVolume(volumeNameLocal); } /** @@ -348,6 +385,9 @@ public void testMkdirNonExistentVolume() throws Exception { Assert.assertEquals(volumeNameLocal, ozoneVolume.getName()); // TODO: Use listStatus to check volume and bucket creation in HDDS-2928. + + // Cleanup + objectStore.deleteVolume(volumeNameLocal); } /** @@ -413,6 +453,12 @@ public void testListStatusOnLargeDirectory() throws Exception { for (int i=0; i < numDirs; i++) { Assert.assertTrue(paths.contains(fileStatuses[i].getPath().getName())); } + + // Cleanup + for(int i = 0; i < numDirs; i++) { + Path p = new Path(root, String.valueOf(i)); + fs.delete(p, true); + } } /** @@ -453,6 +499,10 @@ public void testListStatusOnSubDirs() throws Exception { fileStatus1.equals(dir12.toString())); Assert.assertTrue(fileStatus2.equals(dir11.toString()) || fileStatus2.equals(dir12.toString())); + + // Cleanup + fs.delete(dir2, true); + fs.delete(dir1, true); } @Test @@ -478,6 +528,10 @@ public void testNonExplicitlyCreatedPathExistsAfterItsLeafsWereRemoved() FileStatus fileStatus = fs.getFileStatus(interimPath); Assert.assertEquals("FileStatus does not point to interimPath", interimPath.getName(), fileStatus.getPath().getName()); + + // Cleanup + fs.delete(target, true); + fs.delete(source, true); } /** @@ -505,6 +559,10 @@ public void testRenameToDifferentBucket() throws IOException { } catch (IOException ignored) { // Test passed. Exception thrown as expected. } + + // Cleanup + fs.delete(target, true); + fs.delete(source, true); } private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory) @@ -532,8 +590,8 @@ private void assertKeyNotFoundException(IOException ex) { private Path createRandomVolumeBucketWithDirs() throws IOException { String volume1 = getRandomNonExistVolumeName(); String bucket1 = "bucket-" + RandomStringUtils.randomNumeric(5); - Path bucketPath1 = new Path( - OZONE_URI_DELIMITER + volume1 + OZONE_URI_DELIMITER + bucket1); + Path bucketPath1 = new Path(OZONE_URI_DELIMITER + volume1 + + OZONE_URI_DELIMITER + bucket1); Path dir1 = new Path(bucketPath1, "dir1"); fs.mkdirs(dir1); // Intentionally creating this "in-the-middle" dir key @@ -550,13 +608,23 @@ private Path createRandomVolumeBucketWithDirs() throws IOException { return bucketPath1; } + private void teardownVolumeBucketWithDir(Path bucketPath1) + throws IOException { + fs.delete(new Path(bucketPath1, "dir1"), true); + fs.delete(new Path(bucketPath1, "dir2"), true); + OFSPath ofsPath = new OFSPath(bucketPath1); + OzoneVolume volume = objectStore.getVolume(ofsPath.getVolumeName()); + volume.deleteBucket(ofsPath.getBucketName()); + objectStore.deleteVolume(ofsPath.getVolumeName()); + } + /** * OFS: Test non-recursive listStatus on root and volume. */ @Test public void testListStatusRootAndVolumeNonRecursive() throws Exception { Path bucketPath1 = createRandomVolumeBucketWithDirs(); - createRandomVolumeBucketWithDirs(); + Path bucketPath2 = createRandomVolumeBucketWithDirs(); // listStatus("/volume/bucket") FileStatus[] fileStatusBucket = ofs.listStatus(bucketPath1); Assert.assertEquals(2, fileStatusBucket.length); @@ -568,7 +636,11 @@ public void testListStatusRootAndVolumeNonRecursive() throws Exception { // listStatus("/") Path root = new Path(OZONE_URI_DELIMITER); FileStatus[] fileStatusRoot = ofs.listStatus(root); - Assert.assertEquals(2, fileStatusRoot.length); + // Default volume "s3v" is created by OM during start up. + Assert.assertEquals(2 + 1, fileStatusRoot.length); + // Cleanup + teardownVolumeBucketWithDir(bucketPath2); + teardownVolumeBucketWithDir(bucketPath1); } /** @@ -631,7 +703,7 @@ private void listStatusCheckHelper(Path path) throws IOException { @Test public void testListStatusRootAndVolumeRecursive() throws IOException { Path bucketPath1 = createRandomVolumeBucketWithDirs(); - createRandomVolumeBucketWithDirs(); + Path bucketPath2 = createRandomVolumeBucketWithDirs(); // listStatus("/volume/bucket") listStatusCheckHelper(bucketPath1); // listStatus("/volume") @@ -641,6 +713,9 @@ public void testListStatusRootAndVolumeRecursive() throws IOException { // listStatus("/") Path root = new Path(OZONE_URI_DELIMITER); listStatusCheckHelper(root); + // Cleanup + teardownVolumeBucketWithDir(bucketPath2); + teardownVolumeBucketWithDir(bucketPath1); } /** @@ -672,8 +747,9 @@ private FileStatus[] customListStatus(Path f, boolean recursive, @Test public void testListStatusRootAndVolumeContinuation() throws IOException { - for (int i = 0; i < 5; i++) { - createRandomVolumeBucketWithDirs(); + Path[] paths = new Path[5]; + for (int i = 0; i < paths.length; i++) { + paths[i] = createRandomVolumeBucketWithDirs(); } // Similar to recursive option, we can't test continuation directly with // FileSystem because we can't change LISTING_PAGE_SIZE. Use adapter instead @@ -682,7 +758,8 @@ public void testListStatusRootAndVolumeContinuation() throws IOException { FileStatus[] fileStatusesOver = customListStatus(new Path("/"), false, "", 8); // There are only 5 volumes - Assert.assertEquals(5, fileStatusesOver.length); + // Default volume "s3v" is created during startup. + Assert.assertEquals(5 + 1, fileStatusesOver.length); // numEntries = 5 FileStatus[] fileStatusesExact = customListStatus(new Path("/"), @@ -705,6 +782,11 @@ public void testListStatusRootAndVolumeContinuation() throws IOException { // excludes startVolume (startPath) from the result. Might change. Assert.assertEquals(fileStatusesOver.length, fileStatusesLimit1.length + fileStatusesLimit2.length); + + // Cleanup + for (Path path : paths) { + teardownVolumeBucketWithDir(path); + } } /* @@ -750,7 +832,8 @@ public void testTempMount() throws Exception { } // Write under /tmp/, OFS will create the temp bucket if not exist - fs.mkdirs(new Path("/tmp/dir1")); + Path dir1 = new Path("/tmp/dir1"); + fs.mkdirs(dir1); try (FSDataOutputStream stream = ofs.create(new Path("/tmp/dir1/file1"))) { stream.write(1); @@ -765,11 +848,15 @@ public void testTempMount() throws Exception { Assert.assertEquals( "/tmp/dir1", fileStatuses[0].getPath().toUri().getPath()); // Verify file1 creation - FileStatus[] fileStatusesInDir1 = - fs.listStatus(new Path("/tmp/dir1")); + FileStatus[] fileStatusesInDir1 = fs.listStatus(dir1); Assert.assertEquals(1, fileStatusesInDir1.length); Assert.assertEquals("/tmp/dir1/file1", fileStatusesInDir1[0].getPath().toUri().getPath()); + + // Cleanup + fs.delete(dir1, true); + vol.deleteBucket(hashedUsername); + proxy.deleteVolume(OFSPath.OFS_MOUNT_TMP_VOLUMENAME); } /** @@ -916,7 +1003,7 @@ public void testGetTrashRoots() throws IOException { Path trashRoot1 = new Path(bucketPath, TRASH_PREFIX); Path user1Trash1 = new Path(trashRoot1, username); - // When user trash dir isn't been created + // When user trash dir hasn't been created Assert.assertEquals(0, fs.getTrashRoots(false).size()); Assert.assertEquals(0, fs.getTrashRoots(true).size()); // Let's create our first user1 (current user) trash dir. @@ -995,8 +1082,78 @@ public void testGetTrashRoots() throws IOException { fs.delete(user1Trash1, true); Assert.assertEquals(0, fs.getTrashRoots(false).size()); Assert.assertEquals(0, fs.getTrashRoots(true).size()); + fs.delete(trashRoot1, true); // Restore owner Assert.assertTrue(volume1.setOwner(prevOwner)); } + /** + * Check that no files are actually moved to trash since it is disabled by + * fs.rename(src, dst, options). + */ + @Test + public void testRenameToTrashDisabled() throws IOException { + // Create a file + String testKeyName = "testKey1"; + Path path = new Path(bucketPath, testKeyName); + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(1); + } + + // Call moveToTrash. We can't call protected fs.rename() directly + trash.moveToTrash(path); + + // Construct paths + String username = UserGroupInformation.getCurrentUser().getShortUserName(); + Path trashRoot = new Path(bucketPath, TRASH_PREFIX); + Path userTrash = new Path(trashRoot, username); + Path userTrashCurrent = new Path(userTrash, "Current"); + Path trashPath = new Path(userTrashCurrent, testKeyName); + + // Trash Current directory should still have been created. + Assert.assertTrue(ofs.exists(userTrashCurrent)); + // Check under trash, the key should be deleted instead + Assert.assertFalse(ofs.exists(trashPath)); + + // Cleanup + ofs.delete(trashRoot, true); + } + + @Test + public void testFileDelete() throws Exception { + Path grandparent = new Path(bucketPath, "testBatchDelete"); + Path parent = new Path(grandparent, "parent"); + Path childFolder = new Path(parent, "childFolder"); + // BatchSize is 5, so we're going to set a number that's not a + // multiple of 5. In order to test the final number of keys less than + // batchSize can also be deleted. + for (int i = 0; i < 8; i++) { + Path childFile = new Path(parent, "child" + i); + Path childFolderFile = new Path(childFolder, "child" + i); + ContractTestUtils.touch(fs, childFile); + ContractTestUtils.touch(fs, childFolderFile); + } + + assertTrue(fs.listStatus(grandparent).length == 1); + assertTrue(fs.listStatus(parent).length == 9); + assertTrue(fs.listStatus(childFolder).length == 8); + + Boolean successResult = fs.delete(grandparent, true); + assertTrue(successResult); + assertTrue(!ofs.exists(grandparent)); + for (int i = 0; i < 8; i++) { + Path childFile = new Path(parent, "child" + i); + // Make sure all keys under testBatchDelete/parent should be deleted + assertTrue(!ofs.exists(childFile)); + + // Test to recursively delete child folder, make sure all keys under + // testBatchDelete/parent/childFolder should be deleted. + Path childFolderFile = new Path(childFolder, "child" + i); + assertTrue(!ofs.exists(childFolderFile)); + } + // Will get: WARN ozone.BasicOzoneFileSystem delete: Path does not exist. + // This will return false. + Boolean falseResult = fs.delete(parent, true); + assertFalse(falseResult); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index 7335a933b467..104b10cfa999 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.time.Duration; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -29,7 +28,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; @@ -71,14 +70,13 @@ public static void createCluster() throws IOException { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + conf.addResource(CONTRACT_XML); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java index c90a7ba9ac04..9eb43a0f7937 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.time.Duration; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -29,7 +28,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; @@ -68,14 +67,13 @@ public static void createCluster() throws IOException { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + conf.addResource(CONTRACT_XML); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); @@ -101,12 +99,7 @@ public FileSystem getTestFileSystem() throws IOException { OzoneConsts.OZONE_OFS_URI_SCHEME, cluster.getOzoneManager().getRpcPort()); getConf().set("fs.defaultFS", uri); - - // Note: FileSystem#loadFileSystems doesn't load OFS class because - // META-INF points to org.apache.hadoop.fs.ozone.OzoneFileSystem - getConf().set("fs.ofs.impl", - "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"); - + // fs.ofs.impl should be loaded from META-INF, no need to explicitly set it copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY); copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); return FileSystem.get(getConf()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java index 2bb1fb18ad67..6f58eaea2ced 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java @@ -63,7 +63,7 @@ public static void init() throws Exception { DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(10)); - ratisServerConfig.setNoLeaderTimeout(Duration.ofSeconds(10)); + ratisServerConfig.setNoLeaderTimeout(Duration.ofMinutes(5)); conf.setFromObject(ratisServerConfig); conf.set(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, "2s"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index 2023e0e4cefa..dd543ed7841c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -23,7 +23,9 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -75,6 +77,22 @@ public static void closeContainers( }, omKeyLocationInfoGroups); } + /** + * Close all containers. + * + * @param eventPublisher event publisher. + * @param scm StorageContainerManager instance. + * @return true if close containers is successful. + * @throws IOException + */ + public static void closeAllContainers(EventPublisher eventPublisher, + StorageContainerManager scm) { + for (ContainerID containerID : + scm.getContainerManager().getContainerIDs()) { + eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID); + } + } + /** * Performs the provided consumer on containers which contain the blocks * listed in omKeyLocationInfoGroups. @@ -85,7 +103,7 @@ public static void closeContainers( */ public static void performOperationOnKeyContainers( CheckedConsumer consumer, - List omKeyLocationInfoGroups) throws Exception { + List omKeyLocationInfoGroups) throws Exception{ for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyLocationInfoGroups) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index 1320b5b9cd89..2b492a2c9b7c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -26,7 +26,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; import static org.junit.Assert.fail; -import org.junit.Ignore; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; @@ -109,7 +108,6 @@ /** * Test class that exercises the StorageContainerManager. */ -@Ignore public class TestStorageContainerManager { private static XceiverClientManager xceiverClientManager; private static final Logger LOG = LoggerFactory.getLogger( @@ -119,7 +117,7 @@ public class TestStorageContainerManager { * Set the timeout for every test. */ @Rule - public Timeout testTimeout = new Timeout(300000); + public Timeout testTimeout = new Timeout(900000); @Rule public ExpectedException thrown = ExpectedException.none(); @@ -525,7 +523,7 @@ public void testScmInfo() throws Exception { /** * Test datanode heartbeat well processed with a 4-layer network topology. */ - @Test(timeout = 60000) + @Test(timeout = 180000) public void testScmProcessDatanodeHeartbeat() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); String scmId = UUID.randomUUID().toString(); @@ -593,7 +591,10 @@ public void testCloseContainerCommandOnRestart() throws Exception { new TestStorageContainerManagerHelper(cluster, conf); helper.createKeys(10, 4096); - Thread.sleep(5000); + GenericTestUtils.waitFor(() -> { + return cluster.getStorageContainerManager().getContainerManager(). + getContainers() != null; + }, 1000, 10000); StorageContainerManager scm = cluster.getStorageContainerManager(); List containers = cluster.getStorageContainerManager() @@ -604,8 +605,18 @@ public void testCloseContainerCommandOnRestart() throws Exception { // Stop processing HB scm.getDatanodeProtocolServer().stop(); - scm.getContainerManager().updateContainerState(selectedContainer - .containerID(), HddsProtos.LifeCycleEvent.FINALIZE); + LOG.info( + "Current Container State is {}", selectedContainer.getState()); + try { + scm.getContainerManager().updateContainerState(selectedContainer + .containerID(), HddsProtos.LifeCycleEvent.FINALIZE); + } catch (SCMException ex) { + if (selectedContainer.getState() != HddsProtos.LifeCycleState.CLOSING) { + ex.printStackTrace(); + throw(ex); + } + } + cluster.restartStorageContainerManager(false); scm = cluster.getStorageContainerManager(); EventPublisher publisher = mock(EventPublisher.class); @@ -616,7 +627,6 @@ public void testCloseContainerCommandOnRestart() throws Exception { modifiersField.setAccessible(true); modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL); f.set(replicationManager, publisher); - Thread.sleep(10000); UUID dnUuid = cluster.getHddsDatanodes().iterator().next() .getDatanodeDetails().getUuid(); @@ -628,6 +638,13 @@ public void testCloseContainerCommandOnRestart() throws Exception { CommandForDatanode commandForDatanode = new CommandForDatanode( dnUuid, closeContainerCommand); + GenericTestUtils.waitFor(() -> { + return replicationManager.isRunning(); + }, 1000, 25000); + + // Give ReplicationManager some time to process the containers. + Thread.sleep(5000); + verify(publisher).fireEvent(eq(SCMEvents.DATANODE_COMMAND), argThat(new CloseContainerCommandMatcher(dnUuid, commandForDatanode))); } finally { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java index 087376b2d89e..2b1502f75962 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.XceiverClientSpi; @@ -97,14 +97,12 @@ private void startCluster(OzoneConfiguration conf) throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index 40756b0f4d13..12fb4223d563 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; @@ -108,19 +108,21 @@ public void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + conf.setBoolean(OZONE_CLIENT_STREAM_BUFFER_FLUSH_DELAY, false); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30)); + conf.setFromObject(ratisClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7) .setTotalPipelineNumLimit(10).setBlockSize(blockSize) .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java index 6fa54ed8faac..98de63eef231 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; @@ -102,18 +102,20 @@ public void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30)); + conf.setFromObject(ratisClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7) .setTotalPipelineNumLimit(10).setBlockSize(blockSize) .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java index b9b7e5ed1a6b..0a2ff14706e4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.XceiverClientReply; @@ -46,6 +46,7 @@ import org.apache.ratis.protocol.AlreadyClosedException; import org.apache.ratis.protocol.NotReplicatedException; import org.apache.ratis.protocol.RaftRetryFailureException; +import org.apache.ratis.protocol.TimeoutIOException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -112,18 +113,19 @@ public void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(10)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(ratisClientConfig); + conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE"); conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, @@ -311,14 +313,17 @@ public void testReleaseBuffersOnException() throws Exception { watcher.watchForCommit(replies.get(1).getLogIndex() + 100); Assert.fail("Expected exception not thrown"); } catch(IOException ioe) { - // with retry count set to lower limit and a lower watch request + // with retry count set to noRetry and a lower watch request // timeout, watch request will eventually - // fail with RaftRetryFailure exception from ratis client or the client + // fail with TimeoutIOException from ratis client or the client // can itself get AlreadyClosedException from the Ratis Server + // and the write may fail with RaftRetryFailureException Throwable t = HddsClientUtils.checkForException(ioe); - Assert.assertTrue(t instanceof RaftRetryFailureException || - t instanceof AlreadyClosedException || - t instanceof NotReplicatedException); + Assert.assertTrue("Unexpected exception: " + t.getClass(), + t instanceof RaftRetryFailureException || + t instanceof TimeoutIOException || + t instanceof AlreadyClosedException || + t instanceof NotReplicatedException); } if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1) .getLogIndex()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index 2c7f81896e79..d9f75788ec8b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -45,8 +45,8 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; +import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; @@ -65,7 +65,6 @@ * Tests delete key operation with a slow follower in the datanode * pipeline. */ -@Ignore public class TestContainerReplicationEndToEnd { private static MiniOzoneCluster cluster; @@ -94,10 +93,10 @@ public static void init() throws Exception { conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, containerReportInterval, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, containerReportInterval, - TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, + 5 * containerReportInterval, TimeUnit.MILLISECONDS); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, - 2 * containerReportInterval, TimeUnit.MILLISECONDS); + 10 * containerReportInterval, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS); DatanodeRatisServerConfig ratisServerConfig = @@ -167,9 +166,13 @@ public void testContainerReplication() throws Exception { .getPipeline(pipelineID); key.close(); - if (cluster.getStorageContainerManager().getContainerManager() - .getContainer(new ContainerID(containerID)).getState() != - HddsProtos.LifeCycleState.CLOSING) { + HddsProtos.LifeCycleState containerState = + cluster.getStorageContainerManager().getContainerManager() + .getContainer(new ContainerID(containerID)).getState(); + LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info( + "Current Container State is {}", containerState); + if ((containerState != HddsProtos.LifeCycleState.CLOSING) && + (containerState != HddsProtos.LifeCycleState.CLOSED)) { cluster.getStorageContainerManager().getContainerManager() .updateContainerState(new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index 7af57debd32e..e00d5d07bc48 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; @@ -97,18 +97,19 @@ public void setup() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30)); + conf.setFromObject(ratisClientConfig); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index fba3ba1289d1..a878fb164298 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; @@ -121,23 +121,24 @@ public static void init() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 10, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 10, TimeUnit.SECONDS); + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(10)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(ratisClientConfig); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java index cac16f12cc8d..de377bad3be5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; @@ -120,20 +120,22 @@ public static void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30)); + conf.setFromObject(ratisClientConfig); + conf.setQuietMode(false); int numOfDatanodes = 3; cluster = MiniOzoneCluster.newBuilder(conf) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index e35a39304829..a9c0706e04ac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -97,10 +97,13 @@ private void init() throws Exception { chunkSize = (int) OzoneConsts.MB; blockSize = 4 * chunkSize; conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30)); + conf.setFromObject(ratisClientConfig); + conf.setTimeDuration( OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); @@ -112,14 +115,13 @@ private void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + conf.setBoolean( OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_DELAY, false); conf.setQuietMode(false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index 8f145b48ebfd..76027f7e295a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -95,10 +95,13 @@ private void init() throws Exception { maxFlushSize = 2 * flushSize; blockSize = 4 * chunkSize; conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30)); + conf.setFromObject(ratisClientConfig); + conf.setTimeDuration( OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); @@ -110,14 +113,13 @@ private void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + conf.setQuietMode(false); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java index 8ab176d6a35e..7775bb7def3c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java @@ -43,7 +43,6 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Arrays; import java.util.Collection; @@ -294,66 +293,6 @@ public void testSeek() throws Exception { } } - @Test - public void testCopyLarge() throws Exception { - String keyName = getKeyName(); - OzoneOutputStream key = TestHelper.createKey(keyName, - ReplicationType.RATIS, 0, objectStore, volumeName, bucketName); - - // write data spanning 3 blocks - int dataLength = (2 * blockSize) + (blockSize / 2); - - byte[] inputData = new byte[dataLength]; - Random rand = new Random(); - for (int i = 0; i < dataLength; i++) { - inputData[i] = (byte) rand.nextInt(127); - } - key.write(inputData); - key.close(); - - // test with random start and random length - for (int i = 0; i < 100; i++) { - int inputOffset = rand.nextInt(dataLength - 1); - int length = rand.nextInt(dataLength - inputOffset); - - KeyInputStream keyInputStream = (KeyInputStream) objectStore - .getVolume(volumeName).getBucket(bucketName).readKey(keyName) - .getInputStream(); - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - - keyInputStream.copyLarge(outputStream, inputOffset, length, - new byte[4096]); - byte[] readData = outputStream.toByteArray(); - keyInputStream.close(); - outputStream.close(); - - for (int j = inputOffset; j < inputOffset + length; j++) { - Assert.assertEquals(readData[j - inputOffset], inputData[j]); - } - } - - // test with random start and -ve length - for (int i = 0; i < 10; i++) { - int inputOffset = rand.nextInt(dataLength - 1); - int length = -1; - - KeyInputStream keyInputStream = (KeyInputStream) objectStore - .getVolume(volumeName).getBucket(bucketName).readKey(keyName) - .getInputStream(); - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - - keyInputStream.copyLarge(outputStream, inputOffset, length, - new byte[4096]); - byte[] readData = outputStream.toByteArray(); - keyInputStream.close(); - outputStream.close(); - - for (int j = inputOffset; j < dataLength; j++) { - Assert.assertEquals(readData[j - inputOffset], inputData[j]); - } - } - } - @Test public void testReadChunk() throws Exception { String keyName = getKeyName(); @@ -395,4 +334,62 @@ public void testReadChunk() throws Exception { } keyInputStream.close(); } + + @Test + public void testSkip() throws Exception { + XceiverClientManager.resetXceiverClientMetrics(); + XceiverClientMetrics metrics = XceiverClientManager + .getXceiverClientMetrics(); + long writeChunkCount = metrics.getContainerOpCountMetrics( + ContainerProtos.Type.WriteChunk); + long readChunkCount = metrics.getContainerOpCountMetrics( + ContainerProtos.Type.ReadChunk); + + String keyName = getKeyName(); + OzoneOutputStream key = TestHelper.createKey(keyName, + ReplicationType.RATIS, 0, objectStore, volumeName, bucketName); + + // write data spanning 3 chunks + int dataLength = (2 * chunkSize) + (chunkSize / 2); + byte[] inputData = ContainerTestHelper.getFixedLengthString( + keyString, dataLength).getBytes(UTF_8); + key.write(inputData); + key.close(); + + Assert.assertEquals(writeChunkCount + 3, + metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); + + KeyInputStream keyInputStream = (KeyInputStream) objectStore + .getVolume(volumeName).getBucket(bucketName).readKey(keyName) + .getInputStream(); + + // skip 150 + keyInputStream.skip(70); + Assert.assertEquals(70, keyInputStream.getPos()); + keyInputStream.skip(0); + Assert.assertEquals(70, keyInputStream.getPos()); + keyInputStream.skip(80); + + Assert.assertEquals(150, keyInputStream.getPos()); + + // Skip operation should not result in any readChunk operation. + Assert.assertEquals(readChunkCount, metrics + .getContainerOpCountMetrics(ContainerProtos.Type.ReadChunk)); + + byte[] readData = new byte[chunkSize]; + keyInputStream.read(readData, 0, chunkSize); + + // Since we reading data from index 150 to 250 and the chunk boundary is + // 100 bytes, we need to read 2 chunks. + Assert.assertEquals(readChunkCount + 2, + metrics.getContainerOpCountMetrics(ContainerProtos.Type.ReadChunk)); + + keyInputStream.close(); + + // Verify that the data read matches with the input data at corresponding + // indices. + for (int i = 0; i < chunkSize; i++) { + Assert.assertEquals(inputData[chunkSize + 50 + i], readData[i]); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index 1ca073d868cd..b435ce98057f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -94,18 +94,20 @@ private void startCluster(int datanodes) throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 30, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30)); + conf.setFromObject(ratisClientConfig); + conf.setTimeDuration( OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 591f5c68b911..45d07b097a38 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -25,6 +25,7 @@ import java.util.BitSet; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -87,6 +88,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; @@ -107,6 +109,9 @@ import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; @@ -118,7 +123,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import org.junit.Ignore; + import org.junit.Test; /** @@ -163,8 +168,6 @@ static void startCluster(OzoneConfiguration conf) throws Exception { cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); store = ozClient.getObjectStore(); - String volumeName = HddsClientUtils.getS3VolumeName(conf); - store.createVolume(volumeName); storageContainerLocationClient = cluster.getStorageContainerLocationClient(); ozoneManager = cluster.getOzoneManager(); @@ -237,6 +240,22 @@ public void testOMClientProxyProvider() { ozoneManager.getOmRpcServerAddr())); } + @Test + public void testDefaultS3GVolumeExists() throws Exception { + String s3VolumeName = HddsClientUtils.getS3VolumeName(cluster.getConf()); + OzoneVolume ozoneVolume = store.getVolume(s3VolumeName); + Assert.assertEquals(ozoneVolume.getName(), s3VolumeName); + OMMetadataManager omMetadataManager = + cluster.getOzoneManager().getMetadataManager(); + long transactionID = Long.MAX_VALUE -1 >> 8; + long objectID = transactionID << 8; + OmVolumeArgs omVolumeArgs = + cluster.getOzoneManager().getMetadataManager().getVolumeTable().get( + omMetadataManager.getVolumeKey(s3VolumeName)); + Assert.assertEquals(objectID, omVolumeArgs.getObjectID()); + Assert.assertEquals(transactionID, omVolumeArgs.getUpdateID()); + } + @Test public void testVolumeSetOwner() throws IOException { String volumeName = UUID.randomUUID().toString(); @@ -1186,7 +1205,7 @@ public void testDeleteKey() Assert.assertEquals(keyName, key.getName()); bucket.deleteKey(keyName); - OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND, + OzoneTestUtils.expectOmException(KEY_NOT_FOUND, () -> bucket.getKey(keyName)); } @@ -1201,13 +1220,7 @@ public void testRenameKey() OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - OzoneOutputStream out = bucket.createKey(fromKeyName, - value.getBytes().length, STAND_ALONE, - ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - OzoneKey key = bucket.getKey(fromKeyName); - Assert.assertEquals(fromKeyName, key.getName()); + createTestKey(bucket, fromKeyName, value); // Rename to empty string should fail. OMException oe = null; @@ -1228,15 +1241,79 @@ public void testRenameKey() } catch (OMException e) { oe = e; } - Assert.assertEquals(ResultCodes.KEY_NOT_FOUND, oe.getResult()); + Assert.assertEquals(KEY_NOT_FOUND, oe.getResult()); - key = bucket.getKey(toKeyName); + OzoneKey key = bucket.getKey(toKeyName); Assert.assertEquals(toKeyName, key.getName()); } - // Listing all volumes in the cluster feature has to be fixed after HDDS-357. - // TODO: fix this - @Ignore + @Test + public void testKeysRename() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName1 = "dir/file1"; + String keyName2 = "dir/file2"; + + String newKeyName1 = "dir/key1"; + String newKeyName2 = "dir/key2"; + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + createTestKey(bucket, keyName1, value); + createTestKey(bucket, keyName2, value); + + Map keyMap = new HashMap(); + keyMap.put(keyName1, newKeyName1); + keyMap.put(keyName2, newKeyName2); + bucket.renameKeys(keyMap); + + // new key should exist + Assert.assertEquals(newKeyName1, bucket.getKey(newKeyName1).getName()); + Assert.assertEquals(newKeyName2, bucket.getKey(newKeyName2).getName()); + + // old key should not exist + assertKeyRenamedEx(bucket, keyName1); + assertKeyRenamedEx(bucket, keyName2); + } + + @Test + public void testKeysRenameFail() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName1 = "dir/file1"; + String keyName2 = "dir/file2"; + + String newKeyName1 = "dir/key1"; + String newKeyName2 = "dir/key2"; + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + // Create only keyName1 to test the partial failure of renameKeys. + createTestKey(bucket, keyName1, value); + + Map keyMap = new HashMap(); + keyMap.put(keyName1, newKeyName1); + keyMap.put(keyName2, newKeyName2); + + try { + bucket.renameKeys(keyMap); + } catch (OMException ex) { + Assert.assertEquals(PARTIAL_RENAME, ex.getResult()); + } + + // newKeyName1 should exist + Assert.assertEquals(newKeyName1, bucket.getKey(newKeyName1).getName()); + // newKeyName2 should not exist + assertKeyRenamedEx(bucket, keyName2); + } + @Test public void testListVolume() throws IOException { String volBase = "vol-" + RandomStringUtils.randomNumeric(3); @@ -1741,7 +1818,7 @@ public void testNoSuchUploadError() throws Exception { String uploadID = "random"; OzoneTestUtils - .expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> + .expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> bucket .createMultipartKey(keyName, sampleData.length(), 1, uploadID)); } @@ -1900,10 +1977,107 @@ public void testAbortUploadFail() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - OzoneTestUtils.expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, + OzoneTestUtils.expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> bucket.abortMultipartUpload(keyName, "random")); } + @Test + public void testAbortUploadFailWithInProgressPartUpload() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, + STAND_ALONE, ONE); + + Assert.assertNotNull(omMultipartInfo.getUploadID()); + + // Do not close output stream. + byte[] data = "data".getBytes(UTF_8); + OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, + data.length, 1, omMultipartInfo.getUploadID()); + ozoneOutputStream.write(data, 0, data.length); + + // Abort before completing part upload. + bucket.abortMultipartUpload(keyName, omMultipartInfo.getUploadID()); + + try { + ozoneOutputStream.close(); + fail("testAbortUploadFailWithInProgressPartUpload failed"); + } catch (IOException ex) { + assertTrue(ex instanceof OMException); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, + ((OMException) ex).getResult()); + } + } + + @Test + public void testCommitPartAfterCompleteUpload() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, + STAND_ALONE, ONE); + + Assert.assertNotNull(omMultipartInfo.getUploadID()); + + String uploadID = omMultipartInfo.getUploadID(); + + // upload part 1. + byte[] data = generateData(5 * 1024 * 1024, + (byte) RandomUtils.nextLong()); + OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, + data.length, 1, uploadID); + ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.close(); + + OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = + ozoneOutputStream.getCommitUploadPartInfo(); + + // Do not close output stream for part 2. + ozoneOutputStream = bucket.createMultipartKey(keyName, + data.length, 2, omMultipartInfo.getUploadID()); + ozoneOutputStream.write(data, 0, data.length); + + Map partsMap = new LinkedHashMap<>(); + partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = + bucket.completeMultipartUpload(keyName, + uploadID, partsMap); + + Assert.assertNotNull(omMultipartCommitUploadPartInfo); + + byte[] fileContent = new byte[data.length]; + OzoneInputStream inputStream = bucket.readKey(keyName); + inputStream.read(fileContent); + StringBuilder sb = new StringBuilder(data.length); + + // Combine all parts data, and check is it matching with get key data. + String part1 = new String(data); + sb.append(part1); + Assert.assertEquals(sb.toString(), new String(fileContent)); + + try { + ozoneOutputStream.close(); + fail("testCommitPartAfterCompleteUpload failed"); + } catch (IOException ex) { + assertTrue(ex instanceof OMException); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, + ((OMException) ex).getResult()); + } + } + @Test public void testAbortUploadSuccessWithOutAnyParts() throws Exception { @@ -2131,7 +2305,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() @Test public void testListPartsWithInvalidUploadID() throws Exception { OzoneTestUtils - .expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> { + .expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); @@ -2575,6 +2749,28 @@ private void completeMultipartUpload(OzoneBucket bucket, String keyName, Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash()); } + private void createTestKey(OzoneBucket bucket, String keyName, + String keyValue) throws IOException { + OzoneOutputStream out = bucket.createKey(keyName, + keyValue.getBytes().length, STAND_ALONE, + ONE, new HashMap<>()); + out.write(keyValue.getBytes()); + out.close(); + OzoneKey key = bucket.getKey(keyName); + Assert.assertEquals(keyName, key.getName()); + } + + private void assertKeyRenamedEx(OzoneBucket bucket, String keyName) + throws Exception { + OMException oe = null; + try { + bucket.getKey(keyName); + } catch (OMException e) { + oe = e; + } + Assert.assertEquals(KEY_NOT_FOUND, oe.getResult()); + } + /** * Tests GDPR encryption/decryption. * 1. Create GDPR Enabled bucket. @@ -2673,7 +2869,6 @@ public void testKeyReadWriteForGDPR() throws Exception { * GDPR encryption details (flag, secret, algorithm). * @throws Exception */ - @Ignore @Test public void testDeletedKeyForGDPR() throws Exception { //Step 1 diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index 60a1a1e8fdb4..72ce91af6758 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.security.token.BlockTokenVerifier; @@ -126,8 +125,6 @@ public static void init() throws Exception { cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); store = ozClient.getObjectStore(); - String volumeName = HddsClientUtils.getS3VolumeName(conf); - store.createVolume(volumeName); storageContainerLocationClient = cluster.getStorageContainerLocationClient(); ozoneManager = cluster.getOzoneManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index 63e63af6e1a0..1dd2bd528039 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -106,24 +106,24 @@ public static void init() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 10, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 10, TimeUnit.SECONDS); + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(10)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(ratisClientConfig); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); - conf.setQuietMode(false); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2). setHbInterval(200) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index 131d2890b8b6..c918b9b80da9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -21,8 +21,9 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.*; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -100,23 +101,25 @@ public void init() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 10, TimeUnit.SECONDS); conf.setQuietMode(false); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.write.timeout", 10, TimeUnit.SECONDS); - conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY - + ".client.request.watch.timeout", 10, TimeUnit.SECONDS); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(10)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(ratisClientConfig); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(9) @@ -295,9 +298,11 @@ public void test2WayCommitForTimeoutException() throws Exception { xceiverClient.getPipeline())); reply.getResponse().get(); Assert.assertEquals(3, ratisClient.getCommitInfoMap().size()); + List nodesInPipeline = pipeline.getNodes(); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { // shutdown the ratis follower - if (ContainerTestHelper.isRatisFollower(dn, pipeline)) { + if (nodesInPipeline.contains(dn.getDatanodeDetails()) + && ContainerTestHelper.isRatisFollower(dn, pipeline)) { cluster.shutdownHddsDatanode(dn.getDatanodeDetails()); break; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 97a27c1be6eb..aeb5bc7fbd5f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -29,8 +29,10 @@ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; @@ -112,6 +114,10 @@ public static void init() throws Exception { conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, + false); + conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); + conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) @@ -180,10 +186,8 @@ public void testBlockDeletion() throws Exception { } // close the containers which hold the blocks for the key - OzoneTestUtils.closeContainers(omKeyLocationInfoGroupList, scm); - - waitForDatanodeCommandRetry(); - + OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm); + Thread.sleep(2000); // make sure the containers are closed on the dn omKeyLocationInfoGroupList.forEach((group) -> { List locationInfo = group.getLocationList(); @@ -193,6 +197,9 @@ public void testBlockDeletion() throws Exception { .getContainer(info.getContainerID()).getContainerData() .setState(ContainerProtos.ContainerDataProto.State.CLOSED)); }); + + waitForDatanodeCommandRetry(); + waitForDatanodeBlockDeletionStart(); // The blocks should be deleted in the DN. verifyBlocksDeleted(omKeyLocationInfoGroupList); @@ -214,6 +221,64 @@ public void testBlockDeletion() throws Exception { verifyTransactionsCommitted(); } + @Test + public void testContainerStatistics() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = RandomStringUtils.random(1000000); + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, + ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); + out.write(value.getBytes()); + out.close(); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) + .setBucketName(bucketName).setKeyName(keyName).setDataSize(0) + .setType(HddsProtos.ReplicationType.RATIS) + .setFactor(HddsProtos.ReplicationFactor.THREE) + .setRefreshPipeline(true) + .build(); + List omKeyLocationInfoGroupList = + om.lookupKey(keyArgs).getKeyLocationVersions(); + Thread.sleep(5000); + List containerInfos = + scm.getContainerManager().getContainers(); + final int valueSize = value.getBytes().length; + final int keyCount = 1; + containerInfos.stream().forEach(container -> { + Assert.assertEquals(valueSize, container.getUsedBytes()); + Assert.assertEquals(keyCount, container.getNumberOfKeys()); + }); + + OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm); + // Wait for container to close + Thread.sleep(2000); + // make sure the containers are closed on the dn + omKeyLocationInfoGroupList.forEach((group) -> { + List locationInfo = group.getLocationList(); + locationInfo.forEach( + (info) -> cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() + .getContainer().getContainerSet() + .getContainer(info.getContainerID()).getContainerData() + .setState(ContainerProtos.ContainerDataProto.State.CLOSED)); + }); + + om.deleteKey(keyArgs); + // Wait for blocks to be deleted and container reports to be processed + Thread.sleep(5000); + containerInfos = scm.getContainerManager().getContainers(); + containerInfos.stream().forEach(container -> { + Assert.assertEquals(0, container.getUsedBytes()); + Assert.assertEquals(0, container.getNumberOfKeys()); + }); + } + private void waitForDatanodeBlockDeletionStart() throws TimeoutException, InterruptedException { LogCapturer logCapturer = @@ -275,7 +340,7 @@ private void verifyPendingDeleteEvent() cluster.getHddsDatanodes().get(0) .getDatanodeStateMachine().triggerHeartbeat(); // wait for event to be handled by event handler - Thread.sleep(1000); + Thread.sleep(2000); String output = logCapturer.getOutput(); for (ContainerReplicaProto containerInfo : dummyReport.getReportsList()) { long containerId = containerInfo.getContainerID(); @@ -283,9 +348,6 @@ private void verifyPendingDeleteEvent() if (containerIdsWithDeletedBlocks.contains(containerId)) { Assert.assertTrue(output.contains( "for containerID " + containerId + ". Datanode delete txnID")); - } else { - Assert.assertTrue(!output.contains( - "for containerID " + containerId + ". Datanode delete txnID")); } } logCapturer.clearOutput(); @@ -304,9 +366,6 @@ private void matchContainerTransactionIds() throws IOException { scm.getContainerInfo(containerId).getDeleteTransactionId() > 0); maxTransactionId = max(maxTransactionId, scm.getContainerInfo(containerId).getDeleteTransactionId()); - } else { - Assert.assertEquals( - scm.getContainerInfo(containerId).getDeleteTransactionId(), 0); } Assert.assertEquals(((KeyValueContainerData)dnContainerSet .getContainer(containerId).getContainerData()) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index 154bd559a485..d14238417f36 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -23,13 +23,12 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.Assert; import org.junit.Test; import java.time.Duration; -import java.util.concurrent.TimeUnit; /** * Tests Freon, with MiniOzoneCluster and validate data. @@ -44,14 +43,13 @@ static void startCluster(OzoneConfiguration conf) throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 10, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5).setTotalPipelineNumLimit(8).build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java index 8984b66d0cde..7a28ba59a6f2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.AfterClass; @@ -74,14 +74,13 @@ public static void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf) .setHbProcessorInterval(1000) .setHbInterval(1000) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java index 61ac6af738e6..8e8109cce3a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -67,14 +67,13 @@ public static void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf) .setHbProcessorInterval(1000) .setHbInterval(1000) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java new file mode 100644 index 000000000000..69731ea666b9 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java @@ -0,0 +1,203 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.freon; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.ratis.server.impl.RaftServerImpl; +import org.apache.ratis.server.raftlog.RaftLog; +import java.util.LinkedList; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.URI; + +/** + * Test for HadoopNestedDirGenerator. + */ + +public class TestHadoopNestedDirGenerator { + + private String path; + private OzoneConfiguration conf = null; + private MiniOzoneCluster cluster = null; + private ObjectStore store = null; + private static final Logger LOG = + LoggerFactory.getLogger(TestHadoopNestedDirGenerator.class); + @Before + public void setup() { + path = GenericTestUtils + .getTempPath(TestOzoneClientKeyGenerator.class.getSimpleName()); + GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(RaftServerImpl.LOG, Level.DEBUG); + File baseDir = new File(path); + baseDir.mkdirs(); + } + + /** + * Shutdown MiniDFSCluster. + */ + + private void shutdown() throws IOException { + if (cluster != null) { + cluster.shutdown(); + FileUtils.deleteDirectory(new File(path)); + } + } + + /** + * Create a MiniDFSCluster for testing. + * + * @throws IOException + */ + + private void startCluster() throws Exception { + conf = new OzoneConfiguration(); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); + cluster.waitForClusterToBeReady(); + cluster.waitTobeOutOfSafeMode(); + store = OzoneClientFactory.getRpcClient(conf).getObjectStore(); + } + + @Test + public void testNestedDirTreeGeneration() throws Exception { + try { + startCluster(); + FileOutputStream out = FileUtils.openOutputStream(new File(path, + "conf")); + cluster.getConf().writeXml(out); + out.getFD().sync(); + out.close(); + verifyDirTree("vol1", + "bucket1", 1, 1); + verifyDirTree("vol2", + "bucket1", 1, 5); + verifyDirTree("vol3", + "bucket1", 2, 0); + verifyDirTree("vol4", + "bucket1", 3, 2); + verifyDirTree("vol5", + "bucket1", 5, 4); + } finally { + shutdown(); + } + } + + private void verifyDirTree(String volumeName, String bucketName, + int actualDepth, int span) + throws IOException { + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + String rootPath = "o3fs://" + bucketName + "." + volumeName; + String confPath = new File(path, "conf").getAbsolutePath(); + new Freon().execute(new String[]{"-conf", confPath, "ddsg", "-d", + actualDepth + "", "-s", span + "", "-n", "1", "-r", rootPath}); + // verify the directory structure + FileSystem fileSystem = FileSystem.get(URI.create(rootPath), + conf); + Path rootDir = new Path(rootPath.concat("/")); + // verify root path details + FileStatus[] fileStatuses = fileSystem.listStatus(rootDir); + Path p = null; + for (FileStatus fileStatus : fileStatuses) { + // verify the num of peer directories and span directories + p = depthBFS(fileSystem, fileStatuses, span, actualDepth); + int actualSpan = spanCheck(fileSystem, span, p); + Assert.assertEquals("Mismatch span in a path", + span, actualSpan); + } + } + + /** + * Using BFS(Breadth First Search) to find the depth of nested + * directories. First we push the directory at level 1 to + * queue and follow BFS, as we encounter the child directories + * we put them in an array and increment the depth variable by 1. + */ + + private Path depthBFS(FileSystem fs, FileStatus[] fileStatuses, + int span, int actualDepth) throws IOException { + int depth = 0; + Path p = null; + if(span > 0){ + depth = 0; + } else if(span == 0){ + depth = 1; + } else{ + LOG.info("Span value can never be negative"); + } + LinkedList queue = new LinkedList(); + FileStatus f1 = fileStatuses[0]; + queue.add(f1); + while(queue.size() != 0){ + FileStatus f = queue.poll(); + FileStatus[] temp = fs.listStatus(f.getPath()); + if(temp.length > 0){ + ++depth; + for(int i = 0; i < temp.length; i++){ + queue.add(temp[i]); + } + } + if(span == 0){ + p = f.getPath(); + } else{ + p = f.getPath().getParent(); + } + } + Assert.assertEquals("Mismatch depth in a path", + depth, actualDepth); + return p; + } + + /** + * We get the path of last parent directory or leaf parent directory + * from depthBFS function above and perform 'ls' on that path + * and count the span directories. + */ + + private int spanCheck(FileSystem fs, int span, Path p) throws IOException{ + int sp = 0; + int depth = 0; + if(span >= 0){ + depth = 0; + } else{ + LOG.info("Span value can never be negative"); + } + FileStatus[] fileStatuses = fs.listStatus(p); + for (FileStatus fileStatus : fileStatuses){ + if(fileStatus.isDirectory()){ + ++sp; + } + } + return sp; + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java index ddb27cead77e..beda4220fd3a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java @@ -19,13 +19,12 @@ package org.apache.hadoop.ozone.freon; import java.time.Duration; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.AfterClass; @@ -57,14 +56,13 @@ public static void init() throws Exception { ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); conf.setFromObject(ratisServerConfig); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "rpc.request.timeout", - 3, TimeUnit.SECONDS); - conf.setTimeDuration( - RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + - "watch.request.timeout", - 3, TimeUnit.SECONDS); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); + conf.setFromObject(raftClientConfig); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index 8984e76ea92b..7f049a3f6585 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -22,7 +22,9 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.*; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -32,6 +34,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -42,6 +45,7 @@ import java.io.IOException; import java.util.HashMap; +import java.util.Set; /** * This class tests container report with DN container state info. @@ -122,6 +126,12 @@ public void testContainerReportKeyWrite() throws Exception { ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID()); + Set replicas = + scm.getContainerManager().getContainerReplicas( + new ContainerID(keyInfo.getContainerID())); + Assert.assertTrue(replicas.size() == 1); + replicas.stream().forEach(rp -> + Assert.assertTrue(rp.getDatanodeDetails().getParent() != null)); LOG.info("SCM Container Info keyCount: {} usedBytes: {}", cinfo.getNumberOfKeys(), cinfo.getUsedBytes()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 7a1cb5b278dd..49d3417e9ed4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -23,6 +23,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Arrays; import java.util.BitSet; import java.util.Collections; import java.util.HashMap; @@ -137,6 +138,7 @@ public class TestKeyManagerImpl { private static NodeManager nodeManager; private static StorageContainerManager scm; private static ScmBlockLocationProtocol mockScmBlockLocationProtocol; + private static StorageContainerLocationProtocol mockScmContainerClient; private static OzoneConfiguration conf; private static OMMetadataManager metadataManager; private static File dir; @@ -178,9 +180,11 @@ public static void setUp() throws Exception { StorageUnit.BYTES); conf.setLong(OZONE_KEY_PREALLOCATION_BLOCKS_MAX, 10); + mockScmContainerClient = + Mockito.mock(StorageContainerLocationProtocol.class); keyManager = - new KeyManagerImpl(scm.getBlockProtocolServer(), metadataManager, conf, - "om1", null); + new KeyManagerImpl(scm.getBlockProtocolServer(), + mockScmContainerClient, metadataManager, conf, "om1", null); prefixManager = new PrefixManagerImpl(metadataManager, false); Mockito.when(mockScmBlockLocationProtocol @@ -206,7 +210,8 @@ public static void cleanup() throws Exception { @After public void cleanupTest() throws IOException { List fileStatuses = keyManager - .listStatus(createBuilder().setKeyName("").build(), true, "", 100000); + .listStatus(createBuilder().setKeyName("").build(), true, "", 100000, + null); for (OzoneFileStatus fileStatus : fileStatuses) { if (fileStatus.isFile()) { keyManager.deleteKey( @@ -314,7 +319,7 @@ public void testCreateDirectory() throws IOException { Path path = Paths.get(keyName); while (path != null) { // verify parent directories are created - Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory()); + Assert.assertTrue(keyManager.getFileStatus(keyArgs, null).isDirectory()); path = path.getParent(); } @@ -344,7 +349,7 @@ public void testCreateDirectory() throws IOException { .setKeyName(keyName) .build(); keyManager.createDirectory(keyArgs); - Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory()); + Assert.assertTrue(keyManager.getFileStatus(keyArgs, null).isDirectory()); // create directory where parent is root keyName = RandomStringUtils.randomAlphabetic(5); @@ -352,7 +357,7 @@ public void testCreateDirectory() throws IOException { .setKeyName(keyName) .build(); keyManager.createDirectory(keyArgs); - OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs); + OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs, null); Assert.assertTrue(fileStatus.isDirectory()); Assert.assertTrue(fileStatus.getKeyInfo().getKeyLocationVersions().get(0) .getLocationList().isEmpty()); @@ -407,7 +412,7 @@ public void testOpenFile() throws IOException { keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); keyManager.commitKey(keyArgs, keySession.getId()); Assert.assertTrue(keyManager - .getFileStatus(keyArgs).isFile()); + .getFileStatus(keyArgs, null).isFile()); // try creating a file over a directory keyArgs = createBuilder() @@ -763,6 +768,12 @@ public void testLookupKeyWithLocation() throws IOException { keyArgs.setLocationInfoList(locationInfoList); keyManager.commitKey(keyArgs, keySession.getId()); + ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(1L) + .setPipelineID(pipeline.getId()).build(); + List containerWithPipelines = Arrays.asList( + new ContainerWithPipeline(containerInfo, pipeline)); + when(mockScmContainerClient.getContainerWithPipelineBatch( + Arrays.asList(1L))).thenReturn(containerWithPipelines); OmKeyInfo key = keyManager.lookupKey(keyArgs, null); Assert.assertEquals(key.getKeyName(), keyName); @@ -823,17 +834,17 @@ public void testListStatusWithTableCache() throws Exception { OmKeyArgs rootDirArgs = createKeyArgs(""); // Get entries in both TableCache and DB List fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 1000); + keyManager.listStatus(rootDirArgs, true, "", 1000, null); Assert.assertEquals(100, fileStatuses.size()); // Get entries with startKey=prefixKeyInDB fileStatuses = - keyManager.listStatus(rootDirArgs, true, prefixKeyInDB, 1000); + keyManager.listStatus(rootDirArgs, true, prefixKeyInDB, 1000, null); Assert.assertEquals(50, fileStatuses.size()); // Get entries with startKey=prefixKeyInCache fileStatuses = - keyManager.listStatus(rootDirArgs, true, prefixKeyInCache, 1000); + keyManager.listStatus(rootDirArgs, true, prefixKeyInCache, 1000, null); Assert.assertEquals(100, fileStatuses.size()); // Clean up cache by marking those keys in cache as deleted @@ -865,12 +876,12 @@ public void testListStatusWithTableCacheRecursive() throws Exception { OmKeyArgs rootDirArgs = createKeyArgs(""); // Test listStatus with recursive=false, should only have dirs under root List fileStatuses = - keyManager.listStatus(rootDirArgs, false, "", 1000); + keyManager.listStatus(rootDirArgs, false, "", 1000, null); Assert.assertEquals(2, fileStatuses.size()); // Test listStatus with recursive=true, should have dirs under root and fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 1000); + keyManager.listStatus(rootDirArgs, true, "", 1000, null); Assert.assertEquals(3, fileStatuses.size()); // Add a total of 10 key entries to DB and TableCache under dir1 @@ -894,12 +905,12 @@ public void testListStatusWithTableCacheRecursive() throws Exception { // Test non-recursive, should return the dir under root fileStatuses = - keyManager.listStatus(rootDirArgs, false, "", 1000); + keyManager.listStatus(rootDirArgs, false, "", 1000, null); Assert.assertEquals(2, fileStatuses.size()); // Test recursive, should return the dir and the keys in it fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 1000); + keyManager.listStatus(rootDirArgs, true, "", 1000, null); Assert.assertEquals(10 + 3, fileStatuses.size()); // Clean up @@ -944,12 +955,12 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { OmKeyArgs rootDirArgs = createKeyArgs(""); List fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 1000); + keyManager.listStatus(rootDirArgs, true, "", 1000, null); // Should only get entries that are not marked as deleted. Assert.assertEquals(50, fileStatuses.size()); // Test startKey fileStatuses = - keyManager.listStatus(rootDirArgs, true, prefixKey, 1000); + keyManager.listStatus(rootDirArgs, true, prefixKey, 1000, null); // Should only get entries that are not marked as deleted. Assert.assertEquals(50, fileStatuses.size()); // Verify result @@ -981,7 +992,7 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { existKeySet.removeAll(deletedKeySet); fileStatuses = keyManager.listStatus( - rootDirArgs, true, "", 1000); + rootDirArgs, true, "", 1000, null); // Should only get entries that are not marked as deleted. Assert.assertEquals(50 / 2, fileStatuses.size()); @@ -1000,7 +1011,7 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { expectedKeys.clear(); do { fileStatuses = keyManager.listStatus( - rootDirArgs, true, startKey, batchSize); + rootDirArgs, true, startKey, batchSize, null); // Note fileStatuses will never be empty since we are using the last // keyName as the startKey of next batch, // the startKey itself will show up in the next batch of results. @@ -1048,11 +1059,11 @@ public void testListStatus() throws IOException { OmKeyArgs rootDirArgs = createKeyArgs(""); List fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 100); + keyManager.listStatus(rootDirArgs, true, "", 100, null); // verify the number of status returned is same as number of entries Assert.assertEquals(numEntries, fileStatuses.size()); - fileStatuses = keyManager.listStatus(rootDirArgs, false, "", 100); + fileStatuses = keyManager.listStatus(rootDirArgs, false, "", 100, null); // the number of immediate children of root is 1 Assert.assertEquals(1, fileStatuses.size()); @@ -1060,19 +1071,19 @@ public void testListStatus() throws IOException { // return all the entries. String startKey = children.iterator().next(); fileStatuses = keyManager.listStatus(rootDirArgs, true, - startKey.substring(0, startKey.length() - 1), 100); + startKey.substring(0, startKey.length() - 1), 100, null); Assert.assertEquals(numEntries, fileStatuses.size()); for (String directory : directorySet) { // verify status list received for each directory with recursive flag set // to false OmKeyArgs dirArgs = createKeyArgs(directory); - fileStatuses = keyManager.listStatus(dirArgs, false, "", 100); + fileStatuses = keyManager.listStatus(dirArgs, false, "", 100, null); verifyFileStatus(directory, fileStatuses, directorySet, fileSet, false); // verify status list received for each directory with recursive flag set // to true - fileStatuses = keyManager.listStatus(dirArgs, true, "", 100); + fileStatuses = keyManager.listStatus(dirArgs, true, "", 100, null); verifyFileStatus(directory, fileStatuses, directorySet, fileSet, true); // verify list status call with using the startKey parameter and @@ -1086,7 +1097,7 @@ public void testListStatus() throws IOException { tempFileStatus != null ? tempFileStatus.get(tempFileStatus.size() - 1).getKeyInfo() .getKeyName() : null, - 2); + 2, null); tmpStatusSet.addAll(tempFileStatus); } while (tempFileStatus.size() == 2); verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet, @@ -1104,7 +1115,7 @@ public void testListStatus() throws IOException { tempFileStatus.get(tempFileStatus.size() - 1).getKeyInfo() .getKeyName() : null, - 2); + 2, null); tmpStatusSet.addAll(tempFileStatus); } while (tempFileStatus.size() == 2); verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index ea422e1d731e..ef08abd89096 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -18,10 +18,12 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.UUID; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -33,21 +35,29 @@ import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.util.ExitManager; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.ratis.server.protocol.TermIndex; -import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.ozone.om.TestOzoneManagerHAWithData.createKey; -import org.apache.ratis.server.protocol.TermIndex; +import static org.junit.Assert.assertTrue; + import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.event.Level; /** * Tests the Ratis snaphsots feature in OM. */ +@Ignore("HDDS-3966") public class TestOMRatisSnapshots { private MiniOzoneHAClusterImpl cluster = null; @@ -57,6 +67,10 @@ public class TestOMRatisSnapshots { private String scmId; private String omServiceId; private int numOfOMs = 3; + private OzoneBucket ozoneBucket; + private String volumeName; + private String bucketName; + private static final long SNAPSHOT_THRESHOLD = 50; private static final int LOG_PURGE_GAP = 50; @@ -93,6 +107,20 @@ public void init() throws Exception { cluster.waitForClusterToBeReady(); objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf) .getObjectStore(); + + volumeName = "volume" + RandomStringUtils.randomNumeric(5); + bucketName = "bucket" + RandomStringUtils.randomNumeric(5); + + VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() + .setOwner("user" + RandomStringUtils.randomNumeric(5)) + .setAdmin("admin" + RandomStringUtils.randomNumeric(5)) + .build(); + + objectStore.createVolume(volumeName, createVolumeArgs); + OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); + + retVolumeinfo.createBucket(bucketName); + ozoneBucket = retVolumeinfo.getBucket(bucketName); } /** @@ -123,37 +151,13 @@ public void testInstallSnapshot() throws Exception { OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); // Do some transactions so that the log index increases - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - objectStore.createVolume(volumeName, createVolumeArgs); - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - - retVolumeinfo.createBucket(bucketName); - OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); - - long leaderOMappliedLogIndex = - leaderRatisServer.getLastAppliedTermIndex().getIndex(); - - List keys = new ArrayList<>(); - while (leaderOMappliedLogIndex < 2000) { - keys.add(createKey(ozoneBucket)); - leaderOMappliedLogIndex = - leaderRatisServer.getLastAppliedTermIndex().getIndex(); - } + List keys = writeKeysToIncreaseLogIndex(leaderRatisServer, 200); // Get the latest db checkpoint from the leader OM. OMTransactionInfo omTransactionInfo = OMTransactionInfo.readTransactionInfo(leaderOM.getMetadataManager()); TermIndex leaderOMTermIndex = - TermIndex.newTermIndex(omTransactionInfo.getCurrentTerm(), + TermIndex.newTermIndex(omTransactionInfo.getTerm(), omTransactionInfo.getTransactionIndex()); long leaderOMSnaphsotIndex = leaderOMTermIndex.getIndex(); long leaderOMSnapshotTermIndex = leaderOMTermIndex.getTerm(); @@ -167,30 +171,20 @@ public void testInstallSnapshot() throws Exception { // The recently started OM should be lagging behind the leader OM. long followerOMLastAppliedIndex = followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex(); - Assert.assertTrue( + assertTrue( followerOMLastAppliedIndex < leaderOMSnaphsotIndex); // Install leader OM's db checkpoint on the lagging OM. - File oldDbLocation = followerOM.getMetadataManager().getStore() - .getDbLocation(); - followerOM.getOmRatisServer().getOmStateMachine().pause(); - followerOM.getMetadataManager().getStore().close(); - followerOM.replaceOMDBWithCheckpoint(leaderOMSnaphsotIndex, oldDbLocation, - leaderDbCheckpoint.getCheckpointLocation()); - - // Reload the follower OM with new DB checkpoint from the leader OM. - followerOM.reloadOMState(leaderOMSnaphsotIndex, leaderOMSnapshotTermIndex); - followerOM.getOmRatisServer().getOmStateMachine().unpause( - leaderOMSnaphsotIndex, leaderOMSnapshotTermIndex); - - // After the new checkpoint is loaded and state machine is unpaused, the - // follower OM lastAppliedIndex must match the snapshot index of the - // checkpoint. + followerOM.installCheckpoint(leaderOMNodeId, leaderDbCheckpoint); + + // After the new checkpoint is installed, the follower OM + // lastAppliedIndex must >= the snapshot index of the checkpoint. It + // could be great than snapshot index if there is any conf entry from ratis. followerOMLastAppliedIndex = followerOM.getOmRatisServer() .getLastAppliedTermIndex().getIndex(); - Assert.assertEquals(leaderOMSnaphsotIndex, followerOMLastAppliedIndex); - Assert.assertEquals(leaderOMSnapshotTermIndex, - followerOM.getOmRatisServer().getLastAppliedTermIndex().getTerm()); + assertTrue(followerOMLastAppliedIndex >= leaderOMSnaphsotIndex); + assertTrue(followerOM.getOmRatisServer().getLastAppliedTermIndex() + .getTerm() >= leaderOMSnapshotTermIndex); // Verify that the follower OM's DB contains the transactions which were // made while it was inactive. @@ -204,4 +198,133 @@ public void testInstallSnapshot() throws Exception { followerOMMetaMngr.getOzoneKey(volumeName, bucketName, key))); } } + + @Test + public void testInstallOldCheckpointFailure() throws Exception { + // Get the leader OM + String leaderOMNodeId = OmFailoverProxyUtil + .getFailoverProxyProvider(objectStore.getClientProxy()) + .getCurrentProxyOMNodeId(); + + OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId); + + // Find the inactive OM and start it + String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId(); + if (cluster.isOMActive(followerNodeId)) { + followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId(); + } + cluster.startInactiveOM(followerNodeId); + + OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); + OzoneManagerRatisServer followerRatisServer = followerOM.getOmRatisServer(); + + // Do some transactions so that the log index increases on follower OM + writeKeysToIncreaseLogIndex(followerRatisServer, 100); + + TermIndex leaderCheckpointTermIndex = leaderOM.getOmRatisServer() + .getLastAppliedTermIndex(); + DBCheckpoint leaderDbCheckpoint = leaderOM.getMetadataManager().getStore() + .getCheckpoint(false); + + // Do some more transactions to increase the log index further on + // follower OM such that it is more than the checkpoint index taken on + // leader OM. + writeKeysToIncreaseLogIndex(followerOM.getOmRatisServer(), + leaderCheckpointTermIndex.getIndex() + 100); + + GenericTestUtils.setLogLevel(OzoneManager.LOG, Level.INFO); + GenericTestUtils.LogCapturer logCapture = + GenericTestUtils.LogCapturer.captureLogs(OzoneManager.LOG); + + // Install the old checkpoint on the follower OM. This should fail as the + // followerOM is already ahead of that transactionLogIndex and the OM + // state should be reloaded. + TermIndex followerTermIndex = followerRatisServer.getLastAppliedTermIndex(); + TermIndex newTermIndex = followerOM.installCheckpoint( + leaderOMNodeId, leaderDbCheckpoint); + + String errorMsg = "Cannot proceed with InstallSnapshot as OM is at " + + "TermIndex " + followerTermIndex + " and checkpoint has lower " + + "TermIndex"; + Assert.assertTrue(logCapture.getOutput().contains(errorMsg)); + Assert.assertNull("OM installed checkpoint even though checkpoint " + + "logIndex is less than it's lastAppliedIndex", newTermIndex); + Assert.assertEquals(followerTermIndex, + followerRatisServer.getLastAppliedTermIndex()); + } + + @Test + public void testInstallCorruptedCheckpointFailure() throws Exception { + // Get the leader OM + String leaderOMNodeId = OmFailoverProxyUtil + .getFailoverProxyProvider(objectStore.getClientProxy()) + .getCurrentProxyOMNodeId(); + + OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId); + OzoneManagerRatisServer leaderRatisServer = leaderOM.getOmRatisServer(); + + // Find the inactive OM + String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId(); + if (cluster.isOMActive(followerNodeId)) { + followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId(); + } + OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); + OzoneManagerRatisServer followerRatisServer = followerOM.getOmRatisServer(); + + // Do some transactions so that the log index increases + writeKeysToIncreaseLogIndex(leaderRatisServer, 100); + + DBCheckpoint leaderDbCheckpoint = leaderOM.getMetadataManager().getStore() + .getCheckpoint(false); + Path leaderCheckpointLocation = leaderDbCheckpoint.getCheckpointLocation(); + OMTransactionInfo leaderCheckpointTrxnInfo = OzoneManagerRatisUtils + .getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation); + + // Corrupt the leader checkpoint and install that on the OM. The + // operation should fail and OM should shutdown. + boolean delete = true; + for (File file : leaderCheckpointLocation.toFile() + .listFiles()) { + if (file.getName().contains(".sst")) { + if (delete) { + file.delete(); + delete = false; + } else { + delete = true; + } + } + } + + GenericTestUtils.setLogLevel(OzoneManager.LOG, Level.ERROR); + GenericTestUtils.LogCapturer logCapture = + GenericTestUtils.LogCapturer.captureLogs(OzoneManager.LOG); + followerOM.setExitManagerForTesting(new DummyExitManager()); + + followerOM.installCheckpoint(leaderOMNodeId, leaderCheckpointLocation, + leaderCheckpointTrxnInfo); + + Assert.assertTrue(logCapture.getOutput().contains("System Exit: " + + "Failed to reload OM state and instantiate services.")); + } + + private List writeKeysToIncreaseLogIndex( + OzoneManagerRatisServer omRatisServer, long targetLogIndex) + throws IOException, InterruptedException { + List keys = new ArrayList<>(); + long logIndex = omRatisServer.getLastAppliedTermIndex().getIndex(); + while (logIndex < targetLogIndex) { + keys.add(createKey(ozoneBucket)); + Thread.sleep(100); + logIndex = omRatisServer.getLastAppliedTermIndex().getIndex(); + } + return keys; + } + + private class DummyExitManager extends ExitManager { + @Override + public void exitSystem(int status, String message, Throwable throwable, + Logger log) { + log.error("System Exit: " + message, throwable); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java new file mode 100644 index 000000000000..450eebb3a449 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om; + + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.debug.DBScanner; +import org.apache.hadoop.ozone.debug.RDBParser; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.Assert; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.util.List; +import java.util.ArrayList; + + +/** + * This class tests the Debug LDB CLI that reads from an om.db file. + */ +public class TestOmLDBCli { + private OzoneConfiguration conf; + + private RDBParser rdbParser; + private DBScanner dbScanner; + private DBStore dbStore = null; + private static List keyNames; + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + @Before + public void setup() throws Exception { + conf = new OzoneConfiguration(); + rdbParser = new RDBParser(); + dbScanner = new DBScanner(); + keyNames = new ArrayList<>(); + } + + @After + public void shutdown() throws Exception { + if (dbStore!=null){ + dbStore.close(); + } + } + + @Test + public void testOMDB() throws Exception { + File newFolder = folder.newFolder(); + if(!newFolder.exists()) { + Assert.assertTrue(newFolder.mkdirs()); + } + // Dummy om.db with only keyTable + dbStore = DBStoreBuilder.newBuilder(conf) + .setName("om.db") + .setPath(newFolder.toPath()) + .addTable("keyTable") + .build(); + // insert 5 keys + for (int i = 0; i<5; i++) { + OmKeyInfo value = TestOMRequestUtils.createOmKeyInfo("sampleVol", + "sampleBuck", "key" + (i+1), HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE); + String key = "key"+ (i); + Table keyTable = dbStore.getTable("keyTable"); + keyTable.put(key.getBytes(), value.getProtobuf().toByteArray()); + } + rdbParser.setDbPath(dbStore.getDbLocation().getAbsolutePath()); + dbScanner.setParent(rdbParser); + Assert.assertEquals(5, getKeyNames(dbScanner).size()); + Assert.assertTrue(getKeyNames(dbScanner).contains("key1")); + Assert.assertTrue(getKeyNames(dbScanner).contains("key5")); + Assert.assertFalse(getKeyNames(dbScanner).contains("key6")); + DBScanner.setLimit(1); + Assert.assertEquals(1, getKeyNames(dbScanner).size()); + DBScanner.setLimit(-1); + try { + getKeyNames(dbScanner); + Assert.fail("IllegalArgumentException is expected"); + }catch (IllegalArgumentException e){ + //ignore + } + } + + private static List getKeyNames(DBScanner dbScanner) + throws Exception { + keyNames.clear(); + dbScanner.setTableName("keyTable"); + dbScanner.call(); + Assert.assertFalse(dbScanner.getScannedObjects().isEmpty()); + for (Object o : dbScanner.getScannedObjects()){ + OmKeyInfo keyInfo = (OmKeyInfo)o; + keyNames.add(keyInfo.getKeyName()); + } + return keyNames; + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index d4594ef69498..a53a7588ef40 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -19,11 +19,11 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; +import java.util.Collections; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.client.BlockID; @@ -32,6 +32,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.junit.After; @@ -44,7 +45,6 @@ /** * Test for OM metrics. */ -@SuppressWarnings("deprecation") public class TestOmMetrics { /** @@ -62,8 +62,6 @@ public class TestOmMetrics { /** * Create a MiniDFSCluster for testing. - * - * @throws IOException */ @Before public void setup() throws Exception { @@ -113,7 +111,7 @@ public void testVolumeOps() throws IOException { assertCounter("NumVolumeCheckAccesses", 1L, omMetrics); assertCounter("NumVolumeDeletes", 1L, omMetrics); assertCounter("NumVolumeLists", 1L, omMetrics); - assertCounter("NumVolumes", 0L, omMetrics); + assertCounter("NumVolumes", 1L, omMetrics); ozoneManager.createVolume(null); ozoneManager.createVolume(null); @@ -121,7 +119,9 @@ public void testVolumeOps() throws IOException { ozoneManager.deleteVolume(null); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumVolumes", 2L, omMetrics); + + // Accounting 's3v' volume which is created by default. + assertCounter("NumVolumes", 3L, omMetrics); // inject exception to test for Failure Metrics @@ -154,10 +154,10 @@ public void testVolumeOps() throws IOException { // As last call for volumesOps does not increment numVolumes as those are // failed. - assertCounter("NumVolumes", 2L, omMetrics); + assertCounter("NumVolumes", 3L, omMetrics); cluster.restartOzoneManager(); - assertCounter("NumVolumes", 2L, omMetrics); + assertCounter("NumVolumes", 3L, omMetrics); } @@ -233,20 +233,28 @@ public void testKeyOps() throws IOException { KeyManager keyManager = (KeyManager) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); KeyManager mockKm = Mockito.spy(keyManager); - - Mockito.doReturn(null).when(mockKm).openKey(null); - Mockito.doNothing().when(mockKm).deleteKey(null); - Mockito.doReturn(null).when(mockKm).lookupKey(null, ""); - Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0); - Mockito.doReturn(null).when(mockKm).listTrash( - null, null, null, null, 0); - Mockito.doNothing().when(mockKm).commitKey(any(OmKeyArgs.class), anyLong()); - Mockito.doReturn(null).when(mockKm).initiateMultipartUpload( - any(OmKeyArgs.class)); + BucketManager mockBm = Mockito.mock(BucketManager.class); + + OmBucketInfo mockBucket = OmBucketInfo.newBuilder() + .setVolumeName("").setBucketName("") + .build(); + Mockito.when(mockBm.getBucketInfo(any(), any())).thenReturn(mockBucket); + Mockito.doReturn(null).when(mockKm).openKey(any()); + Mockito.doNothing().when(mockKm).deleteKey(any()); + Mockito.doReturn(null).when(mockKm).lookupKey(any(), any()); + Mockito.doReturn(null).when(mockKm).listKeys(any(), any(), any(), any(), + anyInt()); + Mockito.doReturn(null).when(mockKm).listTrash(any(), any(), any(), any(), + anyInt()); + Mockito.doNothing().when(mockKm).commitKey(any(), anyLong()); + Mockito.doReturn(null).when(mockKm).initiateMultipartUpload(any()); + HddsWhiteboxTestUtils.setInternalState( + ozoneManager, "bucketManager", mockBm); HddsWhiteboxTestUtils.setInternalState( ozoneManager, "keyManager", mockKm); - doKeyOps(); + OmKeyArgs keyArgs = createKeyArgs(); + doKeyOps(keyArgs); MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); assertCounter("NumKeyOps", 7L, omMetrics); @@ -259,34 +267,32 @@ public void testKeyOps() throws IOException { assertCounter("NumInitiateMultipartUploads", 1L, omMetrics); - ozoneManager.openKey(null); - ozoneManager.commitKey(createKeyArgs(), 0); - ozoneManager.openKey(null); - ozoneManager.commitKey(createKeyArgs(), 0); - ozoneManager.openKey(null); - ozoneManager.commitKey(createKeyArgs(), 0); - ozoneManager.deleteKey(null); + ozoneManager.openKey(keyArgs); + ozoneManager.commitKey(keyArgs, 0); + ozoneManager.openKey(keyArgs); + ozoneManager.commitKey(keyArgs, 0); + ozoneManager.openKey(keyArgs); + ozoneManager.commitKey(keyArgs, 0); + ozoneManager.deleteKey(keyArgs); omMetrics = getMetrics("OMMetrics"); assertCounter("NumKeys", 2L, omMetrics); // inject exception to test for Failure Metrics - Mockito.doThrow(exception).when(mockKm).openKey(null); - Mockito.doThrow(exception).when(mockKm).deleteKey(null); - Mockito.doThrow(exception).when(mockKm).lookupKey(null, ""); + Mockito.doThrow(exception).when(mockKm).openKey(any()); + Mockito.doThrow(exception).when(mockKm).deleteKey(any()); + Mockito.doThrow(exception).when(mockKm).lookupKey(any(), any()); Mockito.doThrow(exception).when(mockKm).listKeys( - null, null, null, null, 0); + any(), any(), any(), any(), anyInt()); Mockito.doThrow(exception).when(mockKm).listTrash( - null, null, null, null, 0); - Mockito.doThrow(exception).when(mockKm).commitKey(any(OmKeyArgs.class), - anyLong()); - Mockito.doThrow(exception).when(mockKm).initiateMultipartUpload( - any(OmKeyArgs.class)); + any(), any(), any(), any(), anyInt()); + Mockito.doThrow(exception).when(mockKm).commitKey(any(), anyLong()); + Mockito.doThrow(exception).when(mockKm).initiateMultipartUpload(any()); HddsWhiteboxTestUtils.setInternalState( ozoneManager, "keyManager", mockKm); - doKeyOps(); + doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); assertCounter("NumKeyOps", 21L, omMetrics); @@ -380,39 +386,39 @@ private void doBucketOps() { /** * Test key operations with ignoring thrown exception. */ - private void doKeyOps() { + private void doKeyOps(OmKeyArgs keyArgs) { try { - ozoneManager.openKey(null); + ozoneManager.openKey(keyArgs); } catch (IOException ignored) { } try { - ozoneManager.deleteKey(null); + ozoneManager.deleteKey(keyArgs); } catch (IOException ignored) { } try { - ozoneManager.lookupKey(null); + ozoneManager.lookupKey(keyArgs); } catch (IOException ignored) { } try { - ozoneManager.listKeys(null, null, null, null, 0); + ozoneManager.listKeys("", "", null, null, 0); } catch (IOException ignored) { } try { - ozoneManager.listTrash(null, null, null, null, 0); + ozoneManager.listTrash("", "", null, null, 0); } catch (IOException ignored) { } try { - ozoneManager.commitKey(createKeyArgs(), 0); + ozoneManager.commitKey(keyArgs, 0); } catch (IOException ignored) { } try { - ozoneManager.initiateMultipartUpload(null); + ozoneManager.initiateMultipartUpload(keyArgs); } catch (IOException ignored) { } @@ -420,12 +426,12 @@ private void doKeyOps() { private OmKeyArgs createKeyArgs() { OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(new ContainerBlockID(1, 1))).build(); + .setBlockID(new BlockID(new ContainerBlockID(1, 1))) + .build(); keyLocationInfo.setCreateVersion(0); - List omKeyLocationInfoList = new ArrayList<>(); - omKeyLocationInfoList.add(keyLocationInfo); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setLocationInfoList( - omKeyLocationInfoList).build(); - return keyArgs; + + return new OmKeyArgs.Builder() + .setLocationInfoList(Collections.singletonList(keyLocationInfo)) + .build(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java deleted file mode 100644 index a082e995afa1..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java +++ /dev/null @@ -1,256 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.UUID; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.scm.cli.SQLCLI; -import org.apache.hadoop.test.GenericTestUtils; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; -import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * This class tests the CLI that transforms om.db into SQLite DB files. - */ -@RunWith(Parameterized.class) -public class TestOmSQLCli { - - /** - * Set a timeout for each test. - */ - @Rule - public Timeout timeout = new Timeout(300000); - private MiniOzoneCluster cluster = null; - - private OzoneConfiguration conf; - private SQLCLI cli; - - private String userName = "userTest"; - private String adminName = "adminTest"; - private String volumeName0 = "volumeTest0"; - private String volumeName1 = "volumeTest1"; - private String bucketName0 = "bucketTest0"; - private String bucketName1 = "bucketTest1"; - private String bucketName2 = "bucketTest2"; - private String keyName0 = "key0"; - private String keyName1 = "key1"; - private String keyName2 = "key2"; - private String keyName3 = "key3"; - - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - // Uncomment the below line if we support leveldb in future. - //{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB} - }); - } - - private String metaStoreType; - - public TestOmSQLCli(String type) { - metaStoreType = type; - } - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @Before - public void setup() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - OzoneBucket bucket0 = - TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName0); - OzoneBucket bucket1 = - TestDataUtil.createVolumeAndBucket(cluster, volumeName1, bucketName1); - OzoneBucket bucket2 = - TestDataUtil.createVolumeAndBucket(cluster, volumeName0, bucketName2); - - TestDataUtil.createKey(bucket0, keyName0, ""); - TestDataUtil.createKey(bucket1, keyName1, ""); - TestDataUtil.createKey(bucket2, keyName2, ""); - TestDataUtil.createKey(bucket2, keyName3, ""); - - cluster.getOzoneManager().stop(); - cluster.getStorageContainerManager().stop(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType); - cli = new SQLCLI(conf); - } - - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - // After HDDS-357, we have to fix SQLCli. - // TODO: fix SQLCli - @Ignore - @Test - public void testOmDB() throws Exception { - String dbOutPath = GenericTestUtils.getTempPath( - UUID.randomUUID() + "/out_sql.db"); - - String dbRootPath = conf.get(HddsConfigKeys.OZONE_METADATA_DIRS); - String dbPath = dbRootPath + "/" + OM_DB_NAME; - String[] args = {"-p", dbPath, "-o", dbOutPath}; - - cli.run(args); - - Connection conn = connectDB(dbOutPath); - String sql = "SELECT * FROM volumeList"; - ResultSet rs = executeQuery(conn, sql); - List expectedValues = - new ArrayList<>(Arrays.asList(volumeName0, volumeName1)); - while (rs.next()) { - String userNameRs = rs.getString("userName"); - String volumeNameRs = rs.getString("volumeName"); - assertEquals(userName, userNameRs.substring(1)); - assertTrue(expectedValues.remove(volumeNameRs)); - } - assertEquals(0, expectedValues.size()); - - sql = "SELECT * FROM volumeInfo"; - rs = executeQuery(conn, sql); - expectedValues = - new ArrayList<>(Arrays.asList(volumeName0, volumeName1)); - while (rs.next()) { - String adName = rs.getString("adminName"); - String ownerName = rs.getString("ownerName"); - String volumeName = rs.getString("volumeName"); - assertEquals(adminName, adName); - assertEquals(userName, ownerName); - assertTrue(expectedValues.remove(volumeName)); - } - assertEquals(0, expectedValues.size()); - - sql = "SELECT * FROM aclInfo"; - rs = executeQuery(conn, sql); - expectedValues = - new ArrayList<>(Arrays.asList(volumeName0, volumeName1)); - while (rs.next()) { - String adName = rs.getString("adminName"); - String ownerName = rs.getString("ownerName"); - String volumeName = rs.getString("volumeName"); - String type = rs.getString("type"); - String uName = rs.getString("userName"); - String rights = rs.getString("rights"); - assertEquals(adminName, adName); - assertEquals(userName, ownerName); - assertEquals("USER", type); - assertEquals(userName, uName); - assertEquals("READ_WRITE", rights); - assertTrue(expectedValues.remove(volumeName)); - } - assertEquals(0, expectedValues.size()); - - sql = "SELECT * FROM bucketInfo"; - rs = executeQuery(conn, sql); - HashMap expectedMap = new HashMap<>(); - expectedMap.put(bucketName0, volumeName0); - expectedMap.put(bucketName2, volumeName0); - expectedMap.put(bucketName1, volumeName1); - while (rs.next()) { - String volumeName = rs.getString("volumeName"); - String bucketName = rs.getString("bucketName"); - boolean versionEnabled = rs.getBoolean("versionEnabled"); - String storegeType = rs.getString("storageType"); - assertEquals(volumeName, expectedMap.remove(bucketName)); - assertFalse(versionEnabled); - assertEquals("DISK", storegeType); - } - assertEquals(0, expectedMap.size()); - - sql = "SELECT * FROM keyInfo"; - rs = executeQuery(conn, sql); - HashMap> expectedMap2 = new HashMap<>(); - // no data written, data size will be 0 - expectedMap2.put(keyName0, - Arrays.asList(volumeName0, bucketName0, "0")); - expectedMap2.put(keyName1, - Arrays.asList(volumeName1, bucketName1, "0")); - expectedMap2.put(keyName2, - Arrays.asList(volumeName0, bucketName2, "0")); - expectedMap2.put(keyName3, - Arrays.asList(volumeName0, bucketName2, "0")); - while (rs.next()) { - String volumeName = rs.getString("volumeName"); - String bucketName = rs.getString("bucketName"); - String keyName = rs.getString("keyName"); - int dataSize = rs.getInt("dataSize"); - List vals = expectedMap2.remove(keyName); - assertNotNull(vals); - assertEquals(vals.get(0), volumeName); - assertEquals(vals.get(1), bucketName); - assertEquals(vals.get(2), Integer.toString(dataSize)); - } - assertEquals(0, expectedMap2.size()); - - conn.close(); - Files.delete(Paths.get(dbOutPath)); - } - - private ResultSet executeQuery(Connection conn, String sql) - throws SQLException { - Statement stmt = conn.createStatement(); - return stmt.executeQuery(sql); - } - - private Connection connectDB(String dbPath) throws Exception { - Class.forName("org.sqlite.JDBC"); - String connectPath = - String.format("jdbc:sqlite:%s", dbPath); - return DriverManager.getConnection(connectPath); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java index 646b91571260..aed84f5dd604 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java @@ -50,7 +50,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_DELETE; import static org.junit.Assert.fail; /** @@ -187,8 +187,9 @@ public void testKeysDelete() throws Exception { ozoneBucket.deleteKeys(keyList2); fail("testFilesDelete"); } catch (OMException ex) { - // The expected exception KEY_NOT_FOUND. - Assert.assertEquals(KEY_NOT_FOUND, ex.getResult()); + // The expected exception PARTIAL_DELETE, as if not able to delete, we + // return error codee PARTIAL_DElETE. + Assert.assertEquals(PARTIAL_DELETE, ex.getResult()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java index a8b1eeff53ba..d7aaf37fd637 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java @@ -203,14 +203,16 @@ public void testListVolumeWithOtherUsersListAllAllowed() throws Exception { UserGroupInformation.setLoginUser(user1); checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"), true); + + // Add "s3v" created default by OM. checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), true); + "volume4", "volume5", "s3v"), true); UserGroupInformation.setLoginUser(user2); checkUser(cluster, user1, Arrays.asList("volume1", "volume4", "volume5"), true); checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), true); + "volume4", "volume5", "s3v"), true); stopCluster(cluster); } @@ -229,8 +231,9 @@ public void testListVolumeWithOtherUsersListAllDisallowed() throws Exception { UserGroupInformation.setLoginUser(user1); checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"), false); + // Add "s3v" created default by OM. checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), false); + "volume4", "volume5", "s3v"), false); // While admin should be able to list volumes just fine. UserGroupInformation.setLoginUser(adminUser); @@ -250,8 +253,10 @@ public void testAclEnabledListAllAllowed() throws Exception { true); checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"), true); + + // Add "s3v" created default by OM. checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), true); + "volume4", "volume5", "s3v"), true); stopCluster(cluster); } @@ -268,8 +273,9 @@ public void testAclEnabledListAllDisallowed() throws Exception { checkUser(cluster, user2, Arrays.asList("volume2", "volume3", "volume5"), false); UserGroupInformation.setLoginUser(adminUser); - checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", "volume3", - "volume4", "volume5"), true); + // Add "s3v" created default by OM. + checkUser(cluster, adminUser, Arrays.asList("volume1", "volume2", + "volume3", "volume4", "volume5", "s3v"), true); stopCluster(cluster); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java index 6058fad61d2b..8938cfa48691 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java @@ -41,11 +41,10 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; -import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; import static org.junit.Assert.fail; -import org.junit.Before; -import org.junit.Ignore; +import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; @@ -54,16 +53,15 @@ * Test some client operations after cluster starts. And perform restart and * then performs client operations and check the behavior is expected or not. */ -@Ignore public class TestOzoneManagerRestart { - private MiniOzoneCluster cluster = null; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omId; + private static MiniOzoneCluster cluster = null; + private static OzoneConfiguration conf; + private static String clusterId; + private static String scmId; + private static String omId; @Rule - public Timeout timeout = new Timeout(60000); + public Timeout timeout = new Timeout(240000); /** * Create a MiniDFSCluster for testing. @@ -72,8 +70,8 @@ public class TestOzoneManagerRestart { * * @throws IOException */ - @Before - public void init() throws Exception { + @BeforeClass + public static void init() throws Exception { conf = new OzoneConfiguration(); clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); @@ -94,8 +92,8 @@ public void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @After - public void shutdown() { + @AfterClass + public static void shutdown() { if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 6c858abe7032..d3e228f09c84 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.TestStorageContainerManagerHelper; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -289,8 +288,6 @@ public void testSCMSafeMode() throws Exception { @Test(timeout = 300_000) public void testSCMSafeModeRestrictedOp() throws Exception { - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB); cluster.stop(); cluster = builder.build(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index d77f4d9d1341..844c859ac028 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -20,6 +20,7 @@ import java.util.UUID; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -31,11 +32,10 @@ import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; - -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; + import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -124,7 +124,7 @@ public void testDownloadCheckpoint() throws Exception { .getOzoneManagerDBSnapshot(leaderOMNodeId); long leaderSnapshotIndex = leaderOM.getRatisSnapshotIndex(); - long downloadedSnapshotIndex = getDownloadSnapshotIndex(omSnapshot); + long downloadedSnapshotIndex = getDownloadedSnapshotIndex(omSnapshot); // The snapshot index downloaded from leader OM should match the ratis // snapshot index on the leader OM @@ -133,21 +133,13 @@ public void testDownloadCheckpoint() throws Exception { leaderSnapshotIndex, downloadedSnapshotIndex); } - private long getDownloadSnapshotIndex(DBCheckpoint dbCheckpoint) + private long getDownloadedSnapshotIndex(DBCheckpoint dbCheckpoint) throws Exception { - OzoneConfiguration configuration = new OzoneConfiguration(conf); - configuration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - dbCheckpoint.getCheckpointLocation().getParent().toString()); - - OmMetadataManagerImpl omMetadataManager = - new OmMetadataManagerImpl(configuration); - - long transactionIndex = - OMTransactionInfo.readTransactionInfo(omMetadataManager) - .getTransactionIndex(); - omMetadataManager.stop(); - return transactionIndex; + OMTransactionInfo trxnInfoFromCheckpoint = + OzoneManagerRatisUtils.getTrxnInfoFromCheckpoint(conf, + dbCheckpoint.getCheckpointLocation()); + return trxnInfoFromCheckpoint.getTransactionIndex(); } } \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index 93630548e08a..af9200103d99 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -40,8 +40,8 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -335,7 +335,7 @@ private LinkedTreeMap getContainerResponseMap(String containerResponse, */ private void addKeys(int start, int end) throws Exception { for(int i = start; i < end; i++) { - Pipeline pipeline = getRandomPipeline(); + Pipeline pipeline = TestUtils.getRandomPipeline(); List omKeyLocationInfoList = new ArrayList<>(); BlockID blockID = new BlockID(i, 1); OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID, @@ -358,16 +358,6 @@ private long getTableKeyCount(TableIterator - dfs.container.ratis.num.write.chunk.threads + dfs.container.ratis.num.write.chunk.threads.per.volume 4 diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index 9a5c5abc7d05..dc6de8c51331 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-interface-client - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Client interface Apache Hadoop Ozone Client Interface jar diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index ba193c70d90c..c6e2949122c1 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -60,6 +60,7 @@ enum Type { CommitKey = 36; AllocateBlock = 37; DeleteKeys = 38; + RenameKeys = 39; InitiateMultiPartUpload = 45; CommitMultiPartUpload = 46; @@ -126,6 +127,7 @@ message OMRequest { optional CommitKeyRequest commitKeyRequest = 36; optional AllocateBlockRequest allocateBlockRequest = 37; optional DeleteKeysRequest deleteKeysRequest = 38; + optional RenameKeysRequest renameKeysRequest = 39; optional MultipartInfoInitiateRequest initiateMultiPartUploadRequest = 45; optional MultipartCommitUploadPartRequest commitMultiPartUploadRequest = 46; @@ -198,6 +200,7 @@ message OMResponse { optional CommitKeyResponse commitKeyResponse = 36; optional AllocateBlockResponse allocateBlockResponse = 37; optional DeleteKeysResponse deleteKeysResponse = 38; + optional RenameKeysResponse renameKeysResponse = 39; optional MultipartInfoInitiateResponse initiateMultiPartUploadResponse = 45; optional MultipartCommitUploadPartResponse commitMultiPartUploadResponse = 46; @@ -303,8 +306,14 @@ enum Status { INVALID_VOLUME_NAME = 61; - // When transactions are replayed - REPLAY = 100; + PARTIAL_DELETE = 62; + + DETECTED_LOOP_IN_BUCKET_LINKS = 63; + + NOT_SUPPORTED_OPERATION = 64; + + PARTIAL_RENAME = 65; + } /** @@ -484,6 +493,8 @@ message BucketInfo { optional uint64 objectID = 9; optional uint64 updateID = 10; optional uint64 modificationTime = 11; + optional string sourceVolume = 12; + optional string sourceBucket = 13; } enum StorageTypeProto { @@ -834,6 +845,26 @@ message LookupKeyResponse { optional uint64 openVersion = 4; } +message RenameKeysRequest { + required RenameKeysArgs renameKeysArgs = 1; +} + +message RenameKeysArgs { + required string volumeName = 1; + required string bucketName = 2; + repeated RenameKeysMap renameKeysMap = 3; +} + +message RenameKeysMap { + required string fromKeyName = 1; + required string toKeyName = 2; +} + +message RenameKeysResponse{ + repeated RenameKeysMap unRenamedKeys = 1; + optional bool status = 2; +} + message RenameKeyRequest{ required KeyArgs keyArgs = 1; required string toKeyName = 2; @@ -848,7 +879,18 @@ message DeleteKeyRequest { } message DeleteKeysRequest { - repeated KeyArgs keyArgs = 1; + optional DeleteKeyArgs deleteKeys = 1; +} + +message DeleteKeyArgs { + required string volumeName = 1; + required string bucketName = 2; + repeated string keys = 3; +} + +message DeleteKeysResponse { + optional DeleteKeyArgs unDeletedKeys = 1; + optional bool status = 2; } message DeleteKeyResponse { @@ -866,10 +908,7 @@ message DeletedKeys { repeated string keys = 3; } -message DeleteKeysResponse { - repeated KeyInfo deletedKeys = 1; - repeated KeyInfo unDeletedKeys = 2; -} + message PurgeKeysRequest { repeated DeletedKeys deletedKeys = 1; diff --git a/hadoop-ozone/interface-client/src/main/proto/proto.lock b/hadoop-ozone/interface-client/src/main/resources/proto.lock similarity index 99% rename from hadoop-ozone/interface-client/src/main/proto/proto.lock rename to hadoop-ozone/interface-client/src/main/resources/proto.lock index 2d90e1cd442a..bef3a0df1e8d 100644 --- a/hadoop-ozone/interface-client/src/main/proto/proto.lock +++ b/hadoop-ozone/interface-client/src/main/resources/proto.lock @@ -417,8 +417,8 @@ "integer": 61 }, { - "name": "REPLAY", - "integer": 100 + "name": "PARTIAL_DELETE", + "integer": 62 } ] }, @@ -2434,12 +2434,47 @@ "fields": [ { "id": 1, - "name": "keyArgs", - "type": "KeyArgs", + "name": "deleteKeys", + "type": "DeleteKeyArgs" + } + ] + }, + { + "name": "DeleteKeyArgs", + "fields": [ + { + "id": 1, + "name": "volumeName", + "type": "string" + }, + { + "id": 2, + "name": "bucketName", + "type": "string" + }, + { + "id": 3, + "name": "keys", + "type": "string", "is_repeated": true } ] }, + { + "name": "DeleteKeysResponse", + "fields": [ + { + "id": 1, + "name": "unDeletedKeys", + "type": "DeleteKeyArgs" + }, + { + "id": 2, + "name": "status", + "type": "bool" + } + ] + }, { "name": "DeleteKeyResponse", "fields": [ @@ -2481,23 +2516,6 @@ } ] }, - { - "name": "DeleteKeysResponse", - "fields": [ - { - "id": 1, - "name": "deletedKeys", - "type": "KeyInfo", - "is_repeated": true - }, - { - "id": 2, - "name": "unDeletedKeys", - "type": "KeyInfo", - "is_repeated": true - } - ] - }, { "name": "PurgeKeysRequest", "fields": [ diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 056498a9a050..d01569ae0987 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-ozone-manager - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Manager Server Apache Hadoop Ozone Manager Server jar @@ -43,6 +43,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-docs + provided diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 2e0c6cfa56ea..4349d7c185ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.om; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -25,7 +24,7 @@ import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -34,6 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.util.StringUtils; @@ -41,6 +41,7 @@ import com.google.common.base.Preconditions; import org.iq80.leveldb.DBException; +import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -136,46 +137,34 @@ public void createBucket(OmBucketInfo bucketInfo) throws IOException { throw new OMException("Bucket already exist", OMException.ResultCodes.BUCKET_ALREADY_EXISTS); } + BucketEncryptionKeyInfo bek = bucketInfo.getEncryptionKeyInfo(); - BucketEncryptionKeyInfo.Builder bekb = null; - if (bek != null) { - if (kmsProvider == null) { - throw new OMException("Invalid KMS provider, check configuration " + - CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, - OMException.ResultCodes.INVALID_KMS_PROVIDER); - } - if (bek.getKeyName() == null) { - throw new OMException("Bucket encryption key needed.", OMException - .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // Talk to KMS to retrieve the bucket encryption key info. - KeyProvider.Metadata metadata = getKMSProvider().getMetadata( - bek.getKeyName()); - if (metadata == null) { - throw new OMException("Bucket encryption key " + bek.getKeyName() - + " doesn't exist.", - OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // If the provider supports pool for EDEKs, this will fill in the pool - kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); - bekb = new BucketEncryptionKeyInfo.Builder() - .setKeyName(bek.getKeyName()) - .setVersion(CryptoProtocolVersion.ENCRYPTION_ZONES) - .setSuite(CipherSuite.convert(metadata.getCipher())); - } - List acls = new ArrayList<>(); - acls.addAll(bucketInfo.getAcls()); - volumeArgs.getAclMap().getDefaultAclList().forEach( - a -> acls.add(OzoneAcl.fromProtobufWithAccessType(a))); - - OmBucketInfo.Builder omBucketInfoBuilder = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setAcls(acls) - .setStorageType(bucketInfo.getStorageType()) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setCreationTime(Time.now()) - .addAllMetadata(bucketInfo.getMetadata()); + + boolean hasSourceVolume = bucketInfo.getSourceVolume() != null; + boolean hasSourceBucket = bucketInfo.getSourceBucket() != null; + + if (hasSourceBucket != hasSourceVolume) { + throw new OMException("Both source volume and source bucket are " + + "required for bucket links", + OMException.ResultCodes.INVALID_REQUEST); + } + + if (bek != null && hasSourceBucket) { + throw new OMException("Encryption cannot be set for bucket links", + OMException.ResultCodes.INVALID_REQUEST); + } + + BucketEncryptionKeyInfo.Builder bekb = + createBucketEncryptionKeyInfoBuilder(bek); + + OmBucketInfo.Builder omBucketInfoBuilder = bucketInfo.toBuilder() + .setCreationTime(Time.now()); + + List defaultAclList = + volumeArgs.getAclMap().getDefaultAclList(); + for (OzoneManagerProtocolProtos.OzoneAclInfo a : defaultAclList) { + omBucketInfoBuilder.addAcl(OzoneAcl.fromProtobufWithAccessType(a)); + } if (bekb != null) { omBucketInfoBuilder.setBucketEncryptionKey(bekb.build()); @@ -183,7 +172,14 @@ public void createBucket(OmBucketInfo bucketInfo) throws IOException { OmBucketInfo omBucketInfo = omBucketInfoBuilder.build(); commitBucketInfoToDB(omBucketInfo); - LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); + if (hasSourceBucket) { + LOG.debug("created link {}/{} to bucket: {}/{}", + volumeName, bucketName, + omBucketInfo.getSourceVolume(), omBucketInfo.getSourceBucket()); + } else { + LOG.debug("created bucket: {} in volume: {}", bucketName, + volumeName); + } } catch (IOException | DBException ex) { if (!(ex instanceof OMException)) { LOG.error("Bucket creation failed for bucket:{} in volume:{}", @@ -199,6 +195,38 @@ public void createBucket(OmBucketInfo bucketInfo) throws IOException { } } + @Nullable + public BucketEncryptionKeyInfo.Builder createBucketEncryptionKeyInfoBuilder( + BucketEncryptionKeyInfo bek) throws IOException { + BucketEncryptionKeyInfo.Builder bekb = null; + if (bek != null) { + if (kmsProvider == null) { + throw new OMException("Invalid KMS provider, check configuration " + + CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + OMException.ResultCodes.INVALID_KMS_PROVIDER); + } + if (bek.getKeyName() == null) { + throw new OMException("Bucket encryption key needed.", OMException + .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // Talk to KMS to retrieve the bucket encryption key info. + KeyProvider.Metadata metadata = getKMSProvider().getMetadata( + bek.getKeyName()); + if (metadata == null) { + throw new OMException("Bucket encryption key " + bek.getKeyName() + + " doesn't exist.", + OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // If the provider supports pool for EDEKs, this will fill in the pool + kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); + bekb = new BucketEncryptionKeyInfo.Builder() + .setKeyName(bek.getKeyName()) + .setVersion(CryptoProtocolVersion.ENCRYPTION_ZONES) + .setSuite(CipherSuite.convert(metadata.getCipher())); + } + return bekb; + } + private void commitBucketInfoToDB(OmBucketInfo omBucketInfo) throws IOException { String dbBucketKey = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 2088f5da71c1..658f503a1a70 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -182,14 +182,15 @@ List listTrash(String volumeName, String bucketName, List getPendingDeletionKeys(int count) throws IOException; /** - * Returns a list of all still open key info. Which contains the info about - * the key name and all its associated block IDs. A pending open key has - * prefix #open# in OM DB. + * Returns the names of up to {@code count} open keys that are older than + * the configured expiration age. * - * @return a list of {@link BlockGroup} representing keys and blocks. + * @param count The maximum number of expired open keys to return. + * @return a list of {@link String} representing the names of expired + * open keys. * @throws IOException */ - List getExpiredOpenKeys() throws IOException; + List getExpiredOpenKeys(int count) throws IOException; /** * Deletes a expired open key by its name. Called when a hanging key has been @@ -266,4 +267,10 @@ OmMultipartUploadList listMultipartUploads(String volumeName, OmMultipartUploadListParts listParts(String volumeName, String bucketName, String keyName, String uploadID, int partNumberMarker, int maxParts) throws IOException; + + /** + * Refresh the key block location information by get latest info from SCM. + * @param key + */ + void refresh(OmKeyInfo key) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 9e12e131899f..de395b43c5ba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -23,6 +23,7 @@ import java.security.PrivilegedExceptionAction; import java.time.Instant; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -54,6 +55,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.hdds.utils.db.BatchOperation; @@ -105,7 +107,6 @@ import com.google.common.base.Preconditions; import com.google.common.base.Strings; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; @@ -173,6 +174,15 @@ public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient, conf, omId, secretManager, null, null); } + @VisibleForTesting + public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient, + StorageContainerLocationProtocol scmContainerClient, + OMMetadataManager metadataManager, OzoneConfiguration conf, String omId, + OzoneBlockTokenSecretManager secretManager) { + this(null, new ScmClient(scmBlockClient, scmContainerClient), + metadataManager, conf, omId, secretManager, null, null); + } + public KeyManagerImpl(OzoneManager om, ScmClient scmClient, OzoneConfiguration conf, String omId) { this (om, scmClient, om.getMetadataManager(), conf, omId, @@ -638,48 +648,52 @@ public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) String keyName = args.getKeyName(); metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName); + OmKeyInfo value = null; try { String keyBytes = metadataManager.getOzoneKey( volumeName, bucketName, keyName); - OmKeyInfo value = metadataManager.getKeyTable().get(keyBytes); - if (value == null) { - LOG.debug("volume:{} bucket:{} Key:{} not found", - volumeName, bucketName, keyName); - throw new OMException("Key not found", - KEY_NOT_FOUND); - } - if (grpcBlockTokenEnabled) { - String remoteUser = getRemoteUser().getShortUserName(); - for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) { - key.getLocationList().forEach(k -> { - k.setToken(secretManager.generateToken(remoteUser, - k.getBlockID().getContainerBlockID().toString(), - getAclForUser(remoteUser), - k.getLength())); - }); - } - } - // Refresh container pipeline info from SCM - // based on OmKeyArgs.refreshPipeline flag - if (args.getRefreshPipeline()) { - refreshPipeline(value); - } - if (args.getSortDatanodes()) { - sortDatanodeInPipeline(value, clientAddress); - } - return value; + value = metadataManager.getKeyTable().get(keyBytes); } catch (IOException ex) { if (ex instanceof OMException) { throw ex; } - LOG.debug("Get key failed for volume:{} bucket:{} key:{}", - volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), - KEY_NOT_FOUND); + if (LOG.isDebugEnabled()) { + LOG.debug("Get key failed for volume:{} bucket:{} key:{}", volumeName, + bucketName, keyName, ex); + } + throw new OMException(ex.getMessage(), KEY_NOT_FOUND); } finally { metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } + + if (value == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName, + bucketName, keyName); + } + throw new OMException("Key not found", KEY_NOT_FOUND); + } + if (grpcBlockTokenEnabled) { + String remoteUser = getRemoteUser().getShortUserName(); + for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) { + key.getLocationList().forEach(k -> { + k.setToken(secretManager.generateToken(remoteUser, + k.getBlockID().getContainerBlockID().toString(), + getAclForUser(remoteUser), k.getLength())); + }); + } + } + + // Refresh container pipeline info from SCM + // based on OmKeyArgs.refreshPipeline flag + // value won't be null as the check is done inside try/catch block. + refreshPipeline(value); + + if (args.getSortDatanodes()) { + sortDatanodeInPipeline(value, clientAddress); + } + return value; } /** @@ -688,22 +702,63 @@ public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) */ @VisibleForTesting protected void refreshPipeline(OmKeyInfo value) throws IOException { - final List locationInfoGroups = value == null ? - null : value.getKeyLocationVersions(); + Preconditions.checkNotNull(value, "OMKeyInfo cannot be null"); + refreshPipeline(Arrays.asList(value)); + } - // TODO: fix Some tests that may not initialize container client - // The production should always have containerClient initialized. - if (scmClient.getContainerClient() == null || - CollectionUtils.isEmpty(locationInfoGroups)) { + /** + * Refresh pipeline info in OM by asking SCM. + * @param keyList a list of OmKeyInfo + */ + @VisibleForTesting + protected void refreshPipeline(List keyList) throws IOException { + if (keyList == null || keyList.isEmpty()) { return; } Set containerIDs = new HashSet<>(); - for (OmKeyLocationInfoGroup key : locationInfoGroups) { - for (OmKeyLocationInfo k : key.getLocationList()) { - containerIDs.add(k.getContainerID()); + for (OmKeyInfo keyInfo : keyList) { + List locationInfoGroups = + keyInfo.getKeyLocationVersions(); + + for (OmKeyLocationInfoGroup key : locationInfoGroups) { + for (OmKeyLocationInfo k : key.getLocationList()) { + containerIDs.add(k.getContainerID()); + } + } + } + + Map containerWithPipelineMap = + refreshPipeline(containerIDs); + + for (OmKeyInfo keyInfo : keyList) { + List locationInfoGroups = + keyInfo.getKeyLocationVersions(); + for (OmKeyLocationInfoGroup key : locationInfoGroups) { + for (OmKeyLocationInfo k : key.getLocationList()) { + ContainerWithPipeline cp = + containerWithPipelineMap.get(k.getContainerID()); + if (cp != null && !cp.getPipeline().equals(k.getPipeline())) { + k.setPipeline(cp.getPipeline()); + } + } } } + } + + /** + * Refresh pipeline info in OM by asking SCM. + * @param containerIDs a set of containerIDs + */ + @VisibleForTesting + protected Map refreshPipeline( + Set containerIDs) throws IOException { + // TODO: fix Some tests that may not initialize container client + // The production should always have containerClient initialized. + if (scmClient.getContainerClient() == null || + containerIDs == null || containerIDs.isEmpty()) { + return Collections.EMPTY_MAP; + } Map containerWithPipelineMap = new HashMap<>(); @@ -714,22 +769,12 @@ protected void refreshPipeline(OmKeyInfo value) throws IOException { containerWithPipelineMap.put( cp.getContainerInfo().getContainerID(), cp); } + return containerWithPipelineMap; } catch (IOException ioEx) { - LOG.debug("Get containerPipeline failed for volume:{} bucket:{} " + - "key:{}", value.getVolumeName(), value.getBucketName(), - value.getKeyName(), ioEx); + LOG.debug("Get containerPipeline failed for {}", + containerIDs.toString(), ioEx); throw new OMException(ioEx.getMessage(), SCM_GET_PIPELINE_EXCEPTION); } - - for (OmKeyLocationInfoGroup key : locationInfoGroups) { - for (OmKeyLocationInfo k : key.getLocationList()) { - ContainerWithPipeline cp = - containerWithPipelineMap.get(k.getContainerID()); - if (!cp.getPipeline().equals(k.getPipeline())) { - k.setPipeline(cp.getPipeline()); - } - } - } } @Override @@ -849,7 +894,7 @@ public void deleteKey(OmKeyArgs args) throws IOException { private boolean isKeyEmpty(OmKeyInfo keyInfo) { for (OmKeyLocationInfoGroup keyLocationList : keyInfo .getKeyLocationVersions()) { - if (keyLocationList.getLocationList().size() != 0) { + if (keyLocationList.getLocationListCount() != 0) { return false; } } @@ -867,8 +912,10 @@ public List listKeys(String volumeName, String bucketName, // underlying table using an iterator. That automatically creates a // snapshot of the data, so we don't need these locks at a higher level // when we iterate. - return metadataManager.listKeys(volumeName, bucketName, + List keyList = metadataManager.listKeys(volumeName, bucketName, startKey, keyPrefix, maxKeys); + refreshPipeline(keyList); + return keyList; } @Override @@ -893,9 +940,8 @@ public List getPendingDeletionKeys(final int count) } @Override - public List getExpiredOpenKeys() throws IOException { - return metadataManager.getExpiredOpenKeys(); - + public List getExpiredOpenKeys(int count) throws IOException { + return metadataManager.getExpiredOpenKeys(count); } @Override @@ -960,7 +1006,7 @@ private OmMultipartInfo createMultipartInfo(OmKeyArgs keyArgs, .setReplicationFactor(keyArgs.getFactor()) .setPartKeyInfoList(partKeyInfoMap) .build(); - List locations = new ArrayList<>(); + Map> locations = new HashMap<>(); OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) @@ -1611,7 +1657,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) keyInfo = metadataManager.getOpenKeyTable().get(objectKey); } else { try { - OzoneFileStatus fileStatus = getFileStatus(args); + OzoneFileStatus fileStatus = getFileStatus(args, null); keyInfo = fileStatus.getKeyInfo(); } catch (IOException e) { // OzoneFS will check whether the key exists when write a new key. @@ -1689,12 +1735,25 @@ private void validateOzoneObj(OzoneObj obj) throws OMException { * @throws IOException if there is error in the db * invalid arguments */ - public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { + public OzoneFileStatus getFileStatus(OmKeyArgs args, String clientAddress) + throws IOException { Preconditions.checkNotNull(args, "Key args can not be null"); String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); String keyName = args.getKeyName(); + return getOzoneFileStatus(volumeName, bucketName, keyName, + args.getRefreshPipeline(), false, null); + } + + private OzoneFileStatus getOzoneFileStatus(String volumeName, + String bucketName, + String keyName, + boolean refreshPipeline, + boolean sortDatanodes, + String clientAddress) + throws IOException { + OmKeyInfo fileKeyInfo = null; metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName); try { @@ -1706,36 +1765,43 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { // Check if the key is a file. String fileKeyBytes = metadataManager.getOzoneKey( - volumeName, bucketName, keyName); - OmKeyInfo fileKeyInfo = metadataManager.getKeyTable().get(fileKeyBytes); + volumeName, bucketName, keyName); + fileKeyInfo = metadataManager.getKeyTable().get(fileKeyBytes); + + // Check if the key is a directory. + if (fileKeyInfo == null) { + String dirKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + String dirKeyBytes = metadataManager.getOzoneKey( + volumeName, bucketName, dirKey); + OmKeyInfo dirKeyInfo = metadataManager.getKeyTable().get(dirKeyBytes); + if (dirKeyInfo != null) { + return new OzoneFileStatus(dirKeyInfo, scmBlockSize, true); + } + } + } finally { + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, + bucketName); + + // if the key is a file then do refresh pipeline info in OM by asking SCM if (fileKeyInfo != null) { - if (args.getRefreshPipeline()) { - refreshPipeline(fileKeyInfo); + refreshPipeline(fileKeyInfo); + if (sortDatanodes) { + sortDatanodeInPipeline(fileKeyInfo, clientAddress); } // this is a file return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false); } + } - String dirKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - String dirKeyBytes = metadataManager.getOzoneKey( - volumeName, bucketName, dirKey); - OmKeyInfo dirKeyInfo = metadataManager.getKeyTable().get(dirKeyBytes); - if (dirKeyInfo != null) { - return new OzoneFileStatus(dirKeyInfo, scmBlockSize, true); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Unable to get file status for the key: volume: {}, bucket:" + - " {}, key: {}, with error: No such file exists.", volumeName, - bucketName, keyName); - } - throw new OMException("Unable to get file status: volume: " + - volumeName + " bucket: " + bucketName + " key: " + keyName, - FILE_NOT_FOUND); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); + // Key is not found, throws exception + if (LOG.isDebugEnabled()) { + LOG.debug("Unable to get file status for the key: volume: {}, bucket:" + + " {}, key: {}, with error: No such file exists.", + volumeName, bucketName, keyName); } + throw new OMException("Unable to get file status: volume: " + + volumeName + " bucket: " + bucketName + " key: " + keyName, + FILE_NOT_FOUND); } /** @@ -1832,7 +1898,7 @@ public OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, try { OzoneFileStatus fileStatus; try { - fileStatus = getFileStatus(args); + fileStatus = getFileStatus(args, null); if (fileStatus.isDirectory()) { throw new OMException("Can not write to directory: " + keyName, ResultCodes.NOT_A_FILE); @@ -1877,30 +1943,26 @@ public OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); String keyName = args.getKeyName(); - - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - OzoneFileStatus fileStatus = getFileStatus(args); - if (fileStatus.isFile()) { - if (args.getRefreshPipeline()) { - refreshPipeline(fileStatus.getKeyInfo()); - } - if (args.getSortDatanodes()) { - sortDatanodeInPipeline(fileStatus.getKeyInfo(), clientAddress); - } - return fileStatus.getKeyInfo(); - } - //if key is not of type file or if key is not found we throw an exception - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); + OzoneFileStatus fileStatus = getOzoneFileStatus(volumeName, bucketName, + keyName, args.getRefreshPipeline(), args.getSortDatanodes(), + clientAddress); + //if key is not of type file or if key is not found we throw an exception + if (fileStatus.isFile()) { + return fileStatus.getKeyInfo(); } - throw new OMException("Can not write to directory: " + keyName, ResultCodes.NOT_A_FILE); } + /** + * Refresh the key block location information by get latest info from SCM. + * @param key + */ + public void refresh(OmKeyInfo key) throws IOException { + Preconditions.checkNotNull(key, "Key info can not be null"); + refreshPipeline(Arrays.asList(key)); + } + /** * Helper function for listStatus to find key in TableCache. */ @@ -1951,7 +2013,8 @@ private void listStatusFindKeyInTableCache( * @return list of file status */ public List listStatus(OmKeyArgs args, boolean recursive, - String startKey, long numEntries) throws IOException { + String startKey, long numEntries, String clientAddress) + throws IOException { Preconditions.checkNotNull(args, "Key args can not be null"); List fileStatusList = new ArrayList<>(); @@ -1971,7 +2034,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, bucketName); try { if (Strings.isNullOrEmpty(startKey)) { - OzoneFileStatus fileStatus = getFileStatus(args); + OzoneFileStatus fileStatus = getFileStatus(args, null); if (fileStatus.isFile()) { return Collections.singletonList(fileStatus); } @@ -2067,10 +2130,8 @@ public List listStatus(OmKeyArgs args, boolean recursive, for (Map.Entry entry : cacheKeyMap.entrySet()) { // No need to check if a key is deleted or not here, this is handled // when adding entries to cacheKeyMap from DB. - if (args.getRefreshPipeline()) { - refreshPipeline(entry.getValue().getKeyInfo()); - } - fileStatusList.add(entry.getValue()); + OzoneFileStatus fileStatus = entry.getValue(); + fileStatusList.add(fileStatus); countEntries++; if (countEntries >= numEntries) { break; @@ -2083,6 +2144,11 @@ public List listStatus(OmKeyArgs args, boolean recursive, metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } + if (args.getRefreshPipeline()) { + for(OzoneFileStatus fileStatus : fileStatusList){ + refreshPipeline(fileStatus.getKeyInfo()); + } + } return fileStatusList; } @@ -2127,7 +2193,7 @@ private OzoneFileStatus verifyNoFilesInPath(String volumeName, String keyName = path.toString(); try { OzoneFileStatus fileStatus = - getFileStatus(argsBuilder.setKeyName(keyName).build()); + getFileStatus(argsBuilder.setKeyName(keyName).build(), null); if (fileStatus.isFile()) { LOG.error("Unable to create directory (File already exists): " + "volume: {} bucket: {} key: {}", volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 439f5465087c..c687a4b22907 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -244,14 +244,14 @@ List listVolumes(String userName, String prefix, List getPendingDeletionKeys(int count) throws IOException; /** - * Returns a list of all still open key info. Which contains the info about - * the key name and all its associated block IDs. A pending open key has - * prefix #open# in OM DB. + * Returns the names of up to {@code count} open keys that are older than + * the configured expiration age. * - * @return a list of {@link BlockGroup} representing keys and blocks. + * @param count The maximum number of open keys to return. + * @return a list of {@link String} representing names of open expired keys. * @throws IOException */ - List getExpiredOpenKeys() throws IOException; + List getExpiredOpenKeys(int count) throws IOException; /** * Returns the user Table. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index f2292779f5ee..cd6566292412 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -214,6 +214,10 @@ public void setNumKeys(long val) { this.numKeys.incr(val- oldVal); } + public void decNumKeys(long val) { + this.numKeys.incr(-val); + } + public long getNumVolumes() { return numVolumes.value(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index d48c6fa9a36f..aff8a14e2710 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -19,6 +19,9 @@ import java.io.File; import java.io.IOException; import java.nio.file.Paths; +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -77,9 +80,11 @@ import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import org.apache.ratis.util.ExitUtils; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,7 +119,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { * |----------------------------------------------------------------------| * | s3SecretTable | s3g_access_key_id -> s3Secret | * |----------------------------------------------------------------------| - * | dTokenTable | s3g_access_key_id -> s3Secret | + * | dTokenTable | OzoneTokenID -> renew_time | * |----------------------------------------------------------------------| * | prefixInfoTable | prefix -> PrefixInfo | * |----------------------------------------------------------------------| @@ -155,6 +160,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { private Table prefixTable; private Table transactionInfoTable; private boolean isRatisEnabled; + private boolean ignorePipelineinKey; private Map tableMap = new HashMap<>(); @@ -170,6 +176,9 @@ public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException { isRatisEnabled = conf.getBoolean( OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + // For test purpose only + ignorePipelineinKey = conf.getBoolean( + "ozone.om.ignore.pipeline", Boolean.TRUE); start(conf); } @@ -249,6 +258,20 @@ public void start(OzoneConfiguration configuration) throws IOException { if (store == null) { File metaDir = OMStorage.getOmDbDir(configuration); + // Check if there is a DB Inconsistent Marker in the metaDir. This + // marker indicates that the DB is in an inconsistent state and hence + // the OM process should be terminated. + File markerFile = new File(metaDir, DB_TRANSIENT_MARKER); + if (markerFile.exists()) { + LOG.error("File {} marks that OM DB is in an inconsistent state."); + // Note - The marker file should be deleted only after fixing the DB. + // In an HA setup, this can be done by replacing this DB with a + // checkpoint from another OM. + String errorMsg = "Cannot load OM DB as it is in an inconsistent " + + "state."; + ExitUtils.terminate(1, errorMsg, LOG); + } + RocksDBConfiguration rocksDBConfiguration = configuration.getObject(RocksDBConfiguration.class); @@ -273,10 +296,15 @@ public void start(OzoneConfiguration configuration) throws IOException { public static DBStore loadDB(OzoneConfiguration configuration, File metaDir) throws IOException { + return loadDB(configuration, metaDir, OM_DB_NAME); + } + + public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, + String dbName) throws IOException { RocksDBConfiguration rocksDBConfiguration = configuration.getObject(RocksDBConfiguration.class); DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(configuration, - rocksDBConfiguration).setName(OM_DB_NAME) + rocksDBConfiguration).setName(dbName) .setPath(Paths.get(metaDir.getPath())); DBStore dbStore = addOMTablesAndCodecs(dbStoreBuilder).build(); return dbStore; @@ -296,8 +324,9 @@ protected static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { .addTable(PREFIX_TABLE) .addTable(TRANSACTION_INFO_TABLE) .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec()) - .addCodec(OmKeyInfo.class, new OmKeyInfoCodec()) - .addCodec(RepeatedOmKeyInfo.class, new RepeatedOmKeyInfoCodec()) + .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true)) + .addCodec(RepeatedOmKeyInfo.class, + new RepeatedOmKeyInfoCodec(true)) .addCodec(OmBucketInfo.class, new OmBucketInfoCodec()) .addCodec(OmVolumeArgs.class, new OmVolumeArgsCodec()) .addCodec(UserVolumeInfo.class, new UserVolumeInfoCodec()) @@ -966,10 +995,34 @@ public List getPendingDeletionKeys(final int keyCount) } @Override - public List getExpiredOpenKeys() throws IOException { - List keyBlocksList = Lists.newArrayList(); - // TODO: Fix the getExpiredOpenKeys, Not part of this patch. - return keyBlocksList; + public List getExpiredOpenKeys(int count) throws IOException { + // Only check for expired keys in the open key table, not its cache. + // If a key expires while it is in the cache, it will be cleaned + // up after the cache is flushed. + final Duration expirationDuration = + Duration.of(openKeyExpireThresholdMS, ChronoUnit.MILLIS); + List expiredKeys = Lists.newArrayList(); + + try (TableIterator> + keyValueTableIterator = getOpenKeyTable().iterator()) { + + while (keyValueTableIterator.hasNext() && expiredKeys.size() < count) { + KeyValue openKeyValue = keyValueTableIterator.next(); + String openKey = openKeyValue.getKey(); + OmKeyInfo openKeyInfo = openKeyValue.getValue(); + + Duration openKeyAge = + Duration.between( + Instant.ofEpochMilli(openKeyInfo.getCreationTime()), + Instant.now()); + + if (openKeyAge.compareTo(expirationDuration) >= 0) { + expiredKeys.add(openKey); + } + } + } + + return expiredKeys; } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java index 79bc39f49846..6a5045a8f630 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java @@ -18,10 +18,7 @@ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; @@ -30,7 +27,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.List; import java.util.concurrent.TimeUnit; /** @@ -76,39 +72,13 @@ public int getPriority() { @Override public BackgroundTaskResult call() throws Exception { + // This method is currently never used. It will be implemented in + // HDDS-4122, and integrated into the rest of the code base in HDDS-4123. try { - List keyBlocksList = keyManager.getExpiredOpenKeys(); - if (keyBlocksList.size() > 0) { - int toDeleteSize = keyBlocksList.size(); - LOG.debug("Found {} to-delete open keys in OM", toDeleteSize); - List results = - scmClient.deleteKeyBlocks(keyBlocksList); - int deletedSize = 0; - for (DeleteBlockGroupResult result : results) { - if (result.isSuccess()) { - try { - keyManager.deleteExpiredOpenKey(result.getObjectKey()); - if (LOG.isDebugEnabled()) { - LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); - } - deletedSize += 1; - } catch (IOException e) { - LOG.warn("Failed to delete hanging-open key {}", - result.getObjectKey(), e); - } - } else { - LOG.warn("Deleting open Key {} failed because some of the blocks" - + " were failed to delete, failed blocks: {}", - result.getObjectKey(), - StringUtils.join(",", result.getFailedBlocks())); - } - } - LOG.info("Found {} expired open key entries, successfully " + - "cleaned up {} entries", toDeleteSize, deletedSize); - return results::size; - } else { - LOG.debug("No hanging open key found in OM"); - } + // The new API for deleting expired open keys in OM HA will differ + // significantly from the old implementation. + // The old implementation has been removed so the code compiles. + keyManager.getExpiredOpenKeys(0); } catch (IOException e) { LOG.error("Unable to get hanging open keys, retry in" + " next interval", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 1b75b95a1598..e43524aee2a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -36,15 +36,18 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.Timer; import java.util.TimerTask; -import java.util.concurrent.TimeUnit; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import com.google.common.base.Optional; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -58,6 +61,7 @@ import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; @@ -76,9 +80,12 @@ import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.hdds.utils.RetriableTask; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper; import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.Client; @@ -107,6 +114,7 @@ import org.apache.hadoop.ozone.om.helpers.DBUpdates; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDeleteKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -116,6 +124,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -129,12 +138,15 @@ import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.snapshot.OzoneManagerSnapshotProvider; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo; import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB; import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager; @@ -150,6 +162,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.security.acl.RequestContext; +import org.apache.hadoop.ozone.util.ExitManager; import org.apache.hadoop.ozone.util.OzoneVersionInfo; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -161,7 +174,6 @@ import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; - import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; @@ -170,6 +182,8 @@ import com.google.protobuf.BlockingService; import com.google.protobuf.ProtocolMessageEnum; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; + import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; @@ -178,6 +192,7 @@ import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName; import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; @@ -188,10 +203,13 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER; import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_FILE; import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE; import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; @@ -202,13 +220,17 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_VOLUME_LISTALL_ALLOWED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_VOLUME_LISTALL_ALLOWED_DEFAULT; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneManagerService.newReflectiveBlockingService; + +import org.apache.hadoop.util.Time; import org.apache.ratis.proto.RaftProtos.RaftPeerRole; import org.apache.ratis.server.protocol.TermIndex; +import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.FileUtils; import org.apache.ratis.util.LifeCycle; import org.bouncycastle.pkcs.PKCS10CertificationRequest; @@ -228,6 +250,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl AuditLoggerType.OMLOGGER); private static final String OM_DAEMON = "om"; + private static boolean securityEnabled = false; private OzoneDelegationTokenSecretManager delegationTokenMgr; private OzoneBlockTokenSecretManager blockTokenMgr; @@ -299,6 +322,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private boolean isNativeAuthorizerEnabled; + private ExitManager exitManager; + private enum State { INITIALIZED, RUNNING, @@ -342,9 +367,11 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, OZONE_OM_USER_MAX_VOLUME + " value should be greater than zero"); if (omStorage.getState() != StorageState.INITIALIZED) { - throw new OMException("OM not initialized, current OM storage state: " + - omStorage.getState().name() + ". Please ensure 'ozone om --init' " - + "command is executed once before starting the OM service.", + throw new OMException("OM not initialized, current OM storage state: " + + omStorage.getState().name() + ". Please ensure 'ozone om --init' " + + "command is executed to generate all the required metadata to " + + omStorage.getStorageDir() + + " once before starting the OM service.", ResultCodes.OM_NOT_INITIALIZED); } omMetaDir = OMStorage.getOmDbDir(configuration); @@ -380,6 +407,7 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, ScmInfo scmInfo = getScmInfo(configuration); if (!(scmInfo.getClusterId().equals(omStorage.getClusterID()) && scmInfo .getScmId().equals(omStorage.getScmId()))) { + logVersionMismatch(conf, scmInfo); throw new OMException("SCM version info mismatch.", ResultCodes.SCM_VERSION_MISMATCH_ERROR); } @@ -410,6 +438,10 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, } instantiateServices(); + + // Create special volume s3v which is required for S3G. + addS3GVolumeToDB(); + this.omRatisSnapshotInfo = new OMRatisSnapshotInfo(); initializeRatisServer(); if (isRatisEnabled) { @@ -449,6 +481,21 @@ private OzoneManager(OzoneConfiguration conf) throws IOException, omState = State.INITIALIZED; } + private void logVersionMismatch(OzoneConfiguration conf, ScmInfo scmInfo) { + InetSocketAddress scmBlockAddress = + getScmAddressForBlockClients(conf); + if (!scmInfo.getClusterId().equals(omStorage.getClusterID())) { + LOG.error("clusterId from {} is {}, but is {} in {}", + scmBlockAddress, scmInfo.getClusterId(), + omStorage.getClusterID(), omStorage.getVersionFile()); + } + if (!scmInfo.getScmId().equals(omStorage.getScmId())) { + LOG.error("scmId from {} is {}, but is {} in {}", + scmBlockAddress, scmInfo.getScmId(), + omStorage.getScmId(), omStorage.getVersionFile()); + } + } + /** * Instantiate services which are dependent on the OM DB state. * When OM state is reloaded, these services are re-initialized with the @@ -1116,8 +1163,7 @@ public void start() throws IOException { metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period); keyManager.start(configuration); - omRpcServer.start(); - isOmRpcServerRunning = true; + try { httpServer = new OzoneManagerHttpServer(configuration, this); httpServer.start(); @@ -1125,11 +1171,15 @@ public void start() throws IOException { // Allow OM to start as Http Server failure is not fatal. LOG.error("OM HttpServer failed to start.", ex); } + omRpcServer.start(); + isOmRpcServerRunning = true; + registerMXBean(); startJVMPauseMonitor(); setStartTime(); omState = State.RUNNING; + } /** @@ -1169,8 +1219,6 @@ public void restart() throws IOException { } omRpcServer = getRpcServer(configuration); - omRpcServer.start(); - isOmRpcServerRunning = true; try { httpServer = new OzoneManagerHttpServer(configuration, this); @@ -1179,6 +1227,10 @@ public void restart() throws IOException { // Allow OM to start as Http Server failure is not fatal. LOG.error("OM HttpServer failed to start.", ex); } + + omRpcServer.start(); + isOmRpcServerRunning = true; + registerMXBean(); startJVMPauseMonitor(); @@ -1333,8 +1385,7 @@ private static void getSCMSignedCert(CertificateClient client, .setConfiguration(config) .setScmID(omStore.getScmId()) .setClusterID(omStore.getClusterID()) - .setSubject(subject) - .addIpAddress(ip); + .setSubject(subject); OMHANodeDetails haOMHANodeDetails = OMHANodeDetails.loadOMHAConfig(config); String serviceName = @@ -2022,22 +2073,29 @@ public OmBucketInfo getBucketInfo(String volume, String bucket) */ @Override public OpenKeySession openKey(OmKeyArgs args) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { try { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } catch (OMException ex) { // For new keys key checkAccess call will fail as key doesn't exist. // Check user access for bucket. if (ex.getResult().equals(KEY_NOT_FOUND)) { checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } else { throw ex; } } } + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumKeyAllocates(); return keyManager.openKey(args); @@ -2045,12 +2103,12 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { metrics.incNumKeyAllocateFails(); auditSuccess = false; AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.ALLOCATE_KEY, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.ALLOCATE_KEY, (args == null) ? null : args.toAuditMap())); + OMAction.ALLOCATE_KEY, auditMap)); } } } @@ -2058,24 +2116,29 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { @Override public void commitKey(OmKeyArgs args, long clientID) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { try { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } catch (OMException ex) { // For new keys key checkAccess call will fail as key doesn't exist. // Check user access for bucket. if (ex.getResult().equals(KEY_NOT_FOUND)) { checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } else { throw ex; } } } - Map auditMap = (args == null) ? new LinkedHashMap<>() : - args.toAuditMap(); + + Map auditMap = bucket.audit(args.toAuditMap()); auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID)); + + args = bucket.update(args); + try { metrics.incNumKeyCommits(); keyManager.commitKey(args, clientID); @@ -2085,7 +2148,7 @@ public void commitKey(OmKeyArgs args, long clientID) // As key also can have multiple versions, we need to increment keys // only if version is 0. Currently we have not complete support of // versioning of keys. So, this can be revisited later. - if (args != null && args.getLocationInfoList() != null && + if (args.getLocationInfoList() != null && args.getLocationInfoList().size() > 0 && args.getLocationInfoList().get(0) != null && args.getLocationInfoList().get(0).getCreateVersion() == 0) { @@ -2102,25 +2165,30 @@ public void commitKey(OmKeyArgs args, long clientID) @Override public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, ExcludeList excludeList) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { try { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } catch (OMException ex) { // For new keys key checkAccess call will fail as key doesn't exist. // Check user access for bucket. if (ex.getResult().equals(KEY_NOT_FOUND)) { checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } else { throw ex; } } } + boolean auditSuccess = true; - Map auditMap = (args == null) ? new LinkedHashMap<>() : - args.toAuditMap(); + Map auditMap = bucket.audit(args.toAuditMap()); auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID)); + + args = bucket.update(args); + try { metrics.incNumBlockAllocateCalls(); return keyManager.allocateBlock(args, clientID, excludeList); @@ -2147,11 +2215,18 @@ public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, */ @Override public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumKeyLookups(); return keyManager.lookupKey(args, getClientAddress()); @@ -2159,25 +2234,40 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { metrics.incNumKeyLookupFails(); auditSuccess = false; AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_KEY, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_KEY, - (args == null) ? null : args.toAuditMap())); + auditMap)); } } } + + @Override + public void renameKeys(OmRenameKeys omRenameKeys) + throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented. As write requests use a new approach"); + } + @Override public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { + Preconditions.checkNotNull(args); + + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } - Map auditMap = (args == null) ? new LinkedHashMap<>() : - args.toAuditMap(); + + Map auditMap = bucket.audit(args.toAuditMap()); auditMap.put(OzoneConsts.TO_KEY_NAME, toKeyName); + + args = bucket.update(args); + try { metrics.incNumKeyRenames(); keyManager.renameKey(args, toKeyName); @@ -2199,20 +2289,25 @@ public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { */ @Override public void deleteKey(OmKeyArgs args) throws IOException { + Map auditMap = args.toAuditMap(); try { + ResolvedBucket bucket = resolveBucketLink(args); + args = bucket.update(args); + if (isAclEnabled) { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.DELETE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } + metrics.incNumKeyDeletes(); keyManager.deleteKey(args); AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_KEY, - (args == null) ? null : args.toAuditMap())); + auditMap)); metrics.decNumKeys(); } catch (Exception ex) { metrics.incNumKeyDeleteFails(); AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_KEY, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } } @@ -2220,34 +2315,35 @@ public void deleteKey(OmKeyArgs args) throws IOException { /** * Deletes an existing key. * - * @param args - List attributes of the key. + * @param deleteKeys - List of keys to be deleted from volume and a bucket. * @throws IOException */ @Override - public void deleteKeys(List args) throws IOException { - if (args != null) { - for (OmKeyArgs keyArgs : args) { - deleteKey(keyArgs); - } - } + public void deleteKeys(OmDeleteKeys deleteKeys) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented. As write requests use a new approach"); } @Override public List listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException { + + ResolvedBucket bucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + if (isAclEnabled) { - checkAcls(ResourceType.BUCKET, - StoreType.OZONE, ACLType.LIST, volumeName, bucketName, keyPrefix); + checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.LIST, + bucket.realVolume(), bucket.realBucket(), keyPrefix); } + boolean auditSuccess = true; - Map auditMap = buildAuditMap(volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); + Map auditMap = bucket.audit(); auditMap.put(OzoneConsts.START_KEY, startKey); auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys)); auditMap.put(OzoneConsts.KEY_PREFIX, keyPrefix); + try { metrics.incNumKeyLists(); - return keyManager.listKeys(volumeName, bucketName, + return keyManager.listKeys(bucket.realVolume(), bucket.realBucket(), startKey, keyPrefix, maxKeys); } catch (IOException ex) { metrics.incNumKeyListFails(); @@ -2268,6 +2364,8 @@ public List listTrash(String volumeName, String bucketName, String startKeyName, String keyPrefix, int maxKeys) throws IOException { + // bucket links not supported + if (isAclEnabled) { checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.LIST, volumeName, bucketName, keyPrefix); @@ -2433,13 +2531,15 @@ public List getServiceList() throws IOException { .setType(ServicePort.Type.RPC) .setValue(omRpcAddress.getPort()) .build()); - if (httpServer.getHttpAddress() != null) { + if (httpServer != null + && httpServer.getHttpAddress() != null) { omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() .setType(ServicePort.Type.HTTP) .setValue(httpServer.getHttpAddress().getPort()) .build()); } - if (httpServer.getHttpsAddress() != null) { + if (httpServer != null + && httpServer.getHttpsAddress() != null) { omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() .setType(ServicePort.Type.HTTPS) .setValue(httpServer.getHttpsAddress().getPort()) @@ -2528,66 +2628,75 @@ public S3SecretValue getS3Secret(String kerberosID) throws IOException { @Override public OmMultipartInfo initiateMultipartUpload(OmKeyArgs keyArgs) throws IOException { - OmMultipartInfo multipartInfo; + + Preconditions.checkNotNull(keyArgs); + ResolvedBucket bucket = resolveBucketLink(keyArgs); + + Map auditMap = bucket.audit(keyArgs.toAuditMap()); + + keyArgs = bucket.update(keyArgs); + metrics.incNumInitiateMultipartUploads(); try { - multipartInfo = keyManager.initiateMultipartUpload(keyArgs); + OmMultipartInfo result = keyManager.initiateMultipartUpload(keyArgs); AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null : - keyArgs.toAuditMap())); + OMAction.INITIATE_MULTIPART_UPLOAD, auditMap)); + return result; } catch (IOException ex) { AUDIT.logWriteFailure(buildAuditMessageForFailure( - OMAction.INITIATE_MULTIPART_UPLOAD, - (keyArgs == null) ? null : keyArgs.toAuditMap(), ex)); + OMAction.INITIATE_MULTIPART_UPLOAD, auditMap, ex)); metrics.incNumInitiateMultipartUploadFails(); throw ex; } - return multipartInfo; } @Override public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( OmKeyArgs keyArgs, long clientID) throws IOException { - boolean auditSuccess = false; - OmMultipartCommitUploadPartInfo commitUploadPartInfo; + + Preconditions.checkNotNull(keyArgs); + ResolvedBucket bucket = resolveBucketLink(keyArgs); + + Map auditMap = bucket.audit(keyArgs.toAuditMap()); + + keyArgs = bucket.update(keyArgs); + metrics.incNumCommitMultipartUploadParts(); try { - commitUploadPartInfo = keyManager.commitMultipartUploadPart(keyArgs, - clientID); - auditSuccess = true; + OmMultipartCommitUploadPartInfo result = + keyManager.commitMultipartUploadPart(keyArgs, clientID); + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, auditMap)); + return result; } catch (IOException ex) { - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction - .INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null : keyArgs - .toAuditMap(), ex)); + AUDIT.logWriteFailure(buildAuditMessageForFailure( + OMAction.INITIATE_MULTIPART_UPLOAD, auditMap, ex)); metrics.incNumCommitMultipartUploadPartFails(); throw ex; - } finally { - if (auditSuccess) { - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, (keyArgs == null) ? null : - keyArgs.toAuditMap())); - } } - return commitUploadPartInfo; } @Override public OmMultipartUploadCompleteInfo completeMultipartUpload( OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList) throws IOException { - OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; - metrics.incNumCompleteMultipartUploads(); - Map auditMap = (omKeyArgs == null) ? new LinkedHashMap<>() : - omKeyArgs.toAuditMap(); + Preconditions.checkNotNull(omKeyArgs); + ResolvedBucket bucket = resolveBucketLink(omKeyArgs); + + Map auditMap = bucket.audit(omKeyArgs.toAuditMap()); auditMap.put(OzoneConsts.MULTIPART_LIST, multipartUploadList .getMultipartMap().toString()); + + omKeyArgs = bucket.update(omKeyArgs); + + metrics.incNumCompleteMultipartUploads(); try { - omMultipartUploadCompleteInfo = keyManager.completeMultipartUpload( - omKeyArgs, multipartUploadList); + OmMultipartUploadCompleteInfo result = keyManager.completeMultipartUpload( + omKeyArgs, multipartUploadList); AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction .COMPLETE_MULTIPART_UPLOAD, auditMap)); - return omMultipartUploadCompleteInfo; + return result; } catch (IOException ex) { metrics.incNumCompleteMultipartUploadFails(); AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction @@ -2599,8 +2708,13 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( @Override public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { - Map auditMap = (omKeyArgs == null) ? new LinkedHashMap<>() : - omKeyArgs.toAuditMap(); + Preconditions.checkNotNull(omKeyArgs); + ResolvedBucket bucket = resolveBucketLink(omKeyArgs); + + Map auditMap = bucket.audit(omKeyArgs.toAuditMap()); + + omKeyArgs = bucket.update(omKeyArgs); + metrics.incNumAbortMultipartUploads(); try { keyManager.abortMultipartUpload(omKeyArgs); @@ -2616,22 +2730,24 @@ public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { } @Override - public OmMultipartUploadListParts listParts(String volumeName, - String bucketName, String keyName, String uploadID, int partNumberMarker, - int maxParts) throws IOException { - Map auditMap = new HashMap<>(); - auditMap.put(OzoneConsts.VOLUME, volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); + public OmMultipartUploadListParts listParts(final String volumeName, + final String bucketName, String keyName, String uploadID, + int partNumberMarker, int maxParts) throws IOException { + + ResolvedBucket bucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + + Map auditMap = bucket.audit(); auditMap.put(OzoneConsts.KEY, keyName); auditMap.put(OzoneConsts.UPLOAD_ID, uploadID); auditMap.put(OzoneConsts.PART_NUMBER_MARKER, Integer.toString(partNumberMarker)); auditMap.put(OzoneConsts.MAX_PARTS, Integer.toString(maxParts)); + metrics.incNumListMultipartUploadParts(); try { OmMultipartUploadListParts omMultipartUploadListParts = - keyManager.listParts(volumeName, bucketName, keyName, uploadID, - partNumberMarker, maxParts); + keyManager.listParts(bucket.realVolume(), bucket.realBucket(), + keyName, uploadID, partNumberMarker, maxParts); AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction .LIST_MULTIPART_UPLOAD_PARTS, auditMap)); return omMultipartUploadListParts; @@ -2647,15 +2763,16 @@ public OmMultipartUploadListParts listParts(String volumeName, public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws IOException { - Map auditMap = new HashMap<>(); - auditMap.put(OzoneConsts.VOLUME, volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); + ResolvedBucket bucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + + Map auditMap = bucket.audit(); auditMap.put(OzoneConsts.PREFIX, prefix); metrics.incNumListMultipartUploads(); try { OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads(volumeName, bucketName, prefix); + keyManager.listMultipartUploads(bucket.realVolume(), + bucket.realBucket(), prefix); AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction .LIST_MULTIPART_UPLOADS, auditMap)); return omMultipartUploadList; @@ -2671,26 +2788,26 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, @Override public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { - if (isAclEnabled) { - checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } + ResolvedBucket bucket = resolveBucketLink(args); + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumGetFileStatus(); - return keyManager.getFileStatus(args); + return keyManager.getFileStatus(args, getClientAddress()); } catch (IOException ex) { metrics.incNumGetFileStatusFails(); auditSuccess = false; AUDIT.logReadFailure( - buildAuditMessageForFailure(OMAction.GET_FILE_STATUS, - (args == null) ? null : args.toAuditMap(), ex)); + buildAuditMessageForFailure(OMAction.GET_FILE_STATUS, auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logReadSuccess( - buildAuditMessageForSuccess(OMAction.GET_FILE_STATUS, - (args == null) ? null : args.toAuditMap())); + buildAuditMessageForSuccess(OMAction.GET_FILE_STATUS, auditMap)); } } } @@ -2704,11 +2821,13 @@ private ResourceType getResourceType(OmKeyArgs args) { @Override public void createDirectory(OmKeyArgs args) throws IOException { - if (isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } + ResolvedBucket bucket = resolveBucketLink(args); + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumCreateDirectory(); keyManager.createDirectory(args); @@ -2716,14 +2835,12 @@ public void createDirectory(OmKeyArgs args) throws IOException { metrics.incNumCreateDirectoryFails(); auditSuccess = false; AUDIT.logWriteFailure( - buildAuditMessageForFailure(OMAction.CREATE_DIRECTORY, - (args == null) ? null : args.toAuditMap(), ex)); + buildAuditMessageForFailure(OMAction.CREATE_DIRECTORY, auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(OMAction.CREATE_DIRECTORY, - (args == null) ? null : args.toAuditMap())); + buildAuditMessageForSuccess(OMAction.CREATE_DIRECTORY, auditMap)); } } } @@ -2731,11 +2848,13 @@ public void createDirectory(OmKeyArgs args) throws IOException { @Override public OpenKeySession createFile(OmKeyArgs args, boolean overWrite, boolean recursive) throws IOException { - if (isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), null); - } + ResolvedBucket bucket = resolveBucketLink(args); + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumCreateFile(); return keyManager.createFile(args, overWrite, recursive); @@ -2743,23 +2862,30 @@ public OpenKeySession createFile(OmKeyArgs args, boolean overWrite, metrics.incNumCreateFileFails(); auditSuccess = false; AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.CREATE_FILE, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.CREATE_FILE, (args == null) ? null : args.toAuditMap())); + OMAction.CREATE_FILE, auditMap)); } } } @Override public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumLookupFile(); return keyManager.lookupFile(args, getClientAddress()); @@ -2767,12 +2893,12 @@ public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { metrics.incNumLookupFileFails(); auditSuccess = false; AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.LOOKUP_FILE, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.LOOKUP_FILE, (args == null) ? null : args.toAuditMap())); + OMAction.LOOKUP_FILE, auditMap)); } } } @@ -2780,24 +2906,33 @@ public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { @Override public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries) throws IOException { + + ResolvedBucket bucket = resolveBucketLink(args); + if (isAclEnabled) { checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); + bucket.realVolume(), bucket.realBucket(), args.getKeyName()); } + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + args = bucket.update(args); + try { metrics.incNumListStatus(); - return keyManager.listStatus(args, recursive, startKey, numEntries); + return keyManager.listStatus(args, recursive, startKey, numEntries, + getClientAddress()); } catch (Exception ex) { metrics.incNumListStatusFails(); auditSuccess = false; AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_STATUS, - (args == null) ? null : args.toAuditMap(), ex)); + auditMap, ex)); throw ex; } finally { if (auditSuccess) { AUDIT.logReadSuccess(buildAuditMessageForSuccess( - OMAction.LIST_STATUS, (args == null) ? null : args.toAuditMap())); + OMAction.LIST_STATUS, auditMap)); } } } @@ -2984,51 +3119,52 @@ public List getAcl(OzoneObj obj) throws IOException { /** * Download and install latest checkpoint from leader OM. - * If the download checkpoints snapshot index is greater than this OM's - * last applied transaction index, then re-initialize the OM state via this - * checkpoint. Before re-initializing OM state, the OM Ratis server should - * be stopped so that no new transactions can be applied. * * @param leaderId peerNodeID of the leader OM - * @return If checkpoint is installed, return the corresponding termIndex. - * Otherwise, return null. + * @return If checkpoint is installed successfully, return the + * corresponding termIndex. Otherwise, return null. */ - public TermIndex installSnapshot(String leaderId) { + public TermIndex installSnapshotFromLeader(String leaderId) { if (omSnapshotProvider == null) { LOG.error("OM Snapshot Provider is not configured as there are no peer " + "nodes."); return null; } - DBCheckpoint omDBcheckpoint = getDBCheckpointFromLeader(leaderId); - Path newDBlocation = omDBcheckpoint.getCheckpointLocation(); + DBCheckpoint omDBCheckpoint = getDBCheckpointFromLeader(leaderId); + LOG.info("Downloaded checkpoint from Leader {} to the location {}", + leaderId, omDBCheckpoint.getCheckpointLocation()); - LOG.info("Downloaded checkpoint from Leader {}, in to the location {}", - leaderId, newDBlocation); + TermIndex termIndex = null; + try { + termIndex = installCheckpoint(leaderId, omDBCheckpoint); + } catch (Exception ex) { + LOG.error("Failed to install snapshot from Leader OM: {}", ex); + } + return termIndex; + } - // Check if current ratis log index is smaller than the downloaded - // checkpoint transaction index. If yes, proceed by stopping the ratis - // server so that the OM state can be re-initialized. If no, then do not - // proceed with installSnapshot. + /** + * Install checkpoint. If the checkpoints snapshot index is greater than + * OM's last applied transaction index, then re-initialize the OM + * state via this checkpoint. Before re-initializing OM state, the OM Ratis + * server should be stopped so that no new transactions can be applied. + */ + TermIndex installCheckpoint(String leaderId, DBCheckpoint omDBCheckpoint) + throws Exception { - OMTransactionInfo omTransactionInfo = null; + Path checkpointLocation = omDBCheckpoint.getCheckpointLocation(); + OMTransactionInfo checkpointTrxnInfo = OzoneManagerRatisUtils + .getTrxnInfoFromCheckpoint(configuration, checkpointLocation); - Path dbDir = newDBlocation.getParent(); - if (dbDir == null) { - LOG.error("Incorrect DB location path {} received from checkpoint.", - newDBlocation); - return null; - } + LOG.info("Installing checkpoint with OMTransactionInfo {}", + checkpointTrxnInfo); - try { - omTransactionInfo = - OzoneManagerRatisUtils.getTransactionInfoFromDownloadedSnapshot( - configuration, dbDir); - } catch (Exception ex) { - LOG.error("Failed during opening downloaded snapshot from " + - "{} to obtain transaction index", newDBlocation, ex); - return null; - } + return installCheckpoint(leaderId, checkpointLocation, checkpointTrxnInfo); + } + + TermIndex installCheckpoint(String leaderId, Path checkpointLocation, + OMTransactionInfo checkpointTrxnInfo) throws Exception { File oldDBLocation = metadataManager.getStore().getDbLocation(); try { @@ -3041,58 +3177,74 @@ public TermIndex installSnapshot(String leaderId) { omRatisServer.getOmStateMachine().pause(); } catch (Exception e) { LOG.error("Failed to stop/ pause the services. Cannot proceed with " + - "installing the new checkpoint.", e); - return null; - } - - //TODO: un-pause SM if any failures and retry? - - long lastAppliedIndex = omRatisServer.getLastAppliedTermIndex().getIndex(); - - boolean canProceed = - OzoneManagerRatisUtils.verifyTransactionInfo(omTransactionInfo, - lastAppliedIndex, leaderId, newDBlocation); - - // If downloaded DB has transaction info less than current one, return. - if (!canProceed) { - return null; + "installing the new checkpoint."); + // During stopServices, if KeyManager was stopped successfully and + // OMMetadataManager stop failed, we should restart the KeyManager. + keyManager.start(configuration); + throw e; } - long leaderIndex = omTransactionInfo.getTransactionIndex(); - long leaderTerm = omTransactionInfo.getCurrentTerm(); + File dbBackup = null; + TermIndex termIndex = omRatisServer.getLastAppliedTermIndex(); + long term = termIndex.getTerm(); + long lastAppliedIndex = termIndex.getIndex(); + // Check if current applied log index is smaller than the downloaded + // checkpoint transaction index. If yes, proceed by stopping the ratis + // server so that the OM state can be re-initialized. If no then do not + // proceed with installSnapshot. + boolean canProceed = OzoneManagerRatisUtils.verifyTransactionInfo( + checkpointTrxnInfo, lastAppliedIndex, leaderId, checkpointLocation); - File dbBackup; - try { - dbBackup = replaceOMDBWithCheckpoint(lastAppliedIndex, oldDBLocation, - newDBlocation); - } catch (Exception e) { - LOG.error("OM DB checkpoint replacement with new downloaded checkpoint " + - "failed.", e); - return null; + if (canProceed) { + try { + dbBackup = replaceOMDBWithCheckpoint(lastAppliedIndex, oldDBLocation, + checkpointLocation); + term = checkpointTrxnInfo.getTerm(); + lastAppliedIndex = checkpointTrxnInfo.getTransactionIndex(); + LOG.info("Replaced DB with checkpoint from OM: {}, term: {}, index: {}", + leaderId, term, lastAppliedIndex); + } catch (Exception e) { + LOG.error("Failed to install Snapshot from {} as OM failed to replace" + + " DB with downloaded checkpoint. Reloading old OM state.", e); + } + } else { + LOG.warn("Cannot proceed with InstallSnapshot as OM is at TermIndex {} " + + "and checkpoint has lower TermIndex {}. Reloading old state of OM.", + termIndex, checkpointTrxnInfo.getTermIndex()); } // Reload the OM DB store with the new checkpoint. // Restart (unpause) the state machine and update its last applied index // to the installed checkpoint's snapshot index. try { - reloadOMState(leaderIndex, leaderTerm); - omRatisServer.getOmStateMachine().unpause(leaderIndex, leaderTerm); - } catch (IOException e) { - LOG.error("Failed to reload OM state with new DB checkpoint.", e); - return null; + reloadOMState(lastAppliedIndex, term); + omRatisServer.getOmStateMachine().unpause(lastAppliedIndex, term); + LOG.info("Reloaded OM state with Term: {} and Index: {}", term, + lastAppliedIndex); + } catch (IOException ex) { + String errorMsg = "Failed to reload OM state and instantiate services."; + exitManager.exitSystem(1, errorMsg, ex, LOG); } // Delete the backup DB try { - FileUtils.deleteFully(dbBackup); + if (dbBackup != null) { + FileUtils.deleteFully(dbBackup); + } } catch (IOException e) { LOG.error("Failed to delete the backup of the original DB {}", dbBackup); } + if (lastAppliedIndex != checkpointTrxnInfo.getTransactionIndex()) { + // Install Snapshot failed and old state was reloaded. Return null to + // Ratis to indicate that installation failed. + return null; + } + // TODO: We should only return the snpashotIndex to the leader. // Should be fixed after RATIS-586 - TermIndex newTermIndex = TermIndex.newTermIndex(leaderTerm, leaderIndex); + TermIndex newTermIndex = TermIndex.newTermIndex(term, lastAppliedIndex); return newTermIndex; } @@ -3126,16 +3278,17 @@ void stopServices() throws Exception { * * @param lastAppliedIndex the last applied index in the current OM DB. * @param checkpointPath path to the new DB checkpoint - * @return location of the backup of the original DB + * @return location of backup of the original DB * @throws Exception */ File replaceOMDBWithCheckpoint(long lastAppliedIndex, File oldDB, - Path checkpointPath) throws Exception { + Path checkpointPath) throws IOException { // Take a backup of the current DB String dbBackupName = OzoneConsts.OM_DB_BACKUP_PREFIX + lastAppliedIndex + "_" + System.currentTimeMillis(); - File dbBackup = new File(oldDB.getParentFile(), dbBackupName); + File dbDir = oldDB.getParentFile(); + File dbBackup = new File(dbDir, dbBackupName); try { Files.move(oldDB.toPath(), dbBackup.toPath()); @@ -3146,13 +3299,28 @@ File replaceOMDBWithCheckpoint(long lastAppliedIndex, File oldDB, } // Move the new DB checkpoint into the om metadata dir + Path markerFile = new File(dbDir, DB_TRANSIENT_MARKER).toPath(); try { + // Create a Transient Marker file. This file will be deleted if the + // checkpoint DB is successfully moved to the old DB location or if the + // old DB backup is reset to its location. If not, then the OM DB is in + // an inconsistent state and this marker file will fail OM from + // starting up. + Files.createFile(markerFile); Files.move(checkpointPath, oldDB.toPath()); + Files.deleteIfExists(markerFile); } catch (IOException e) { LOG.error("Failed to move downloaded DB checkpoint {} to metadata " + "directory {}. Resetting to original DB.", checkpointPath, oldDB.toPath()); - Files.move(dbBackup.toPath(), oldDB.toPath()); + try { + Files.move(dbBackup.toPath(), oldDB.toPath()); + Files.deleteIfExists(markerFile); + } catch (IOException ex) { + String errorMsg = "Failed to reset to original DB. OM is in an " + + "inconsistent state."; + ExitUtils.terminate(1, errorMsg, ex, LOG); + } throw e; } return dbBackup; @@ -3314,4 +3482,161 @@ private void startJVMPauseMonitor() { jvmPauseMonitor.init(configuration); jvmPauseMonitor.start(); } + + public ResolvedBucket resolveBucketLink(KeyArgs args) throws IOException { + return resolveBucketLink( + Pair.of(args.getVolumeName(), args.getBucketName())); + } + + public ResolvedBucket resolveBucketLink(OmKeyArgs args) + throws IOException { + return resolveBucketLink( + Pair.of(args.getVolumeName(), args.getBucketName())); + } + + public ResolvedBucket resolveBucketLink(Pair requested) + throws IOException { + Pair resolved = + resolveBucketLink(requested, new HashSet<>()); + return new ResolvedBucket(requested, resolved); + } + + /** + * Resolves bucket symlinks. Read permission is required for following links. + * + * @param volumeAndBucket the bucket to be resolved (if it is a link) + * @param visited collects link buckets visited during the resolution to + * avoid infinite loops + * @return bucket location possibly updated with its actual volume and bucket + * after following bucket links + * @throws IOException (most likely OMException) if ACL check fails, bucket is + * not found, loop is detected in the links, etc. + */ + private Pair resolveBucketLink( + Pair volumeAndBucket, + Set> visited) throws IOException { + + String volumeName = volumeAndBucket.getLeft(); + String bucketName = volumeAndBucket.getRight(); + OmBucketInfo info = bucketManager.getBucketInfo(volumeName, bucketName); + if (!info.isLink()) { + return volumeAndBucket; + } + + if (!visited.add(volumeAndBucket)) { + throw new OMException("Detected loop in bucket links", + DETECTED_LOOP_IN_BUCKET_LINKS); + } + + if (isAclEnabled) { + checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, + volumeName, bucketName, null); + } + + return resolveBucketLink( + Pair.of(info.getSourceVolume(), info.getSourceBucket()), + visited); + } + + @VisibleForTesting + void setExitManagerForTesting(ExitManager exitManagerForTesting) { + this.exitManager = exitManagerForTesting; + } + + + public boolean getEnableFileSystemPaths() { + return configuration.getBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, + OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); + } + + /** + * Create volume which is required for S3Gateway operations. + * @throws IOException + */ + private void addS3GVolumeToDB() throws IOException { + String s3VolumeName = HddsClientUtils.getS3VolumeName(configuration); + String dbVolumeKey = metadataManager.getVolumeKey(s3VolumeName); + + if (!s3VolumeName.equals(OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT)) { + LOG.warn("Make sure that all S3Gateway use same volume name." + + " Otherwise user need to manually create/configure Volume " + + "configured by S3Gateway"); + } + if (!metadataManager.getVolumeTable().isExist(dbVolumeKey)) { + long transactionID = (Long.MAX_VALUE - 1) >> 8; + long objectID = OMFileRequest.getObjIDFromTxId(transactionID); + String userName = + UserGroupInformation.getCurrentUser().getShortUserName(); + + // Add volume and user info to DB and cache. + + OmVolumeArgs omVolumeArgs = createS3VolumeInfo(s3VolumeName, + transactionID, objectID); + + String dbUserKey = metadataManager.getUserKey(userName); + UserVolumeInfo userVolumeInfo = UserVolumeInfo.newBuilder() + .setObjectID(objectID) + .setUpdateID(transactionID) + .addVolumeNames(s3VolumeName).build(); + + + // Commit to DB. + BatchOperation batchOperation = + metadataManager.getStore().initBatchOperation(); + + metadataManager.getVolumeTable().putWithBatch(batchOperation, dbVolumeKey, + omVolumeArgs); + metadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, + userVolumeInfo); + + metadataManager.getStore().commitBatchOperation(batchOperation); + + // Add to cache. + metadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(dbVolumeKey), + new CacheValue<>(Optional.of(omVolumeArgs), transactionID)); + metadataManager.getUserTable().addCacheEntry( + new CacheKey<>(dbUserKey), + new CacheValue<>(Optional.of(userVolumeInfo), transactionID)); + LOG.info("Created Volume {} With Owner {} required for S3Gateway " + + "operations.", s3VolumeName, userName); + } + } + + private OmVolumeArgs createS3VolumeInfo(String s3Volume, long transactionID, + long objectID) throws IOException { + String userName = UserGroupInformation.getCurrentUser().getShortUserName(); + long time = Time.now(); + + OmVolumeArgs.Builder omVolumeArgs = new OmVolumeArgs.Builder() + .setVolume(s3Volume) + .setUpdateID(transactionID) + .setObjectID(objectID) + .setCreationTime(time) + .setModificationTime(time) + .setOwnerName(userName) + .setAdminName(userName) + .setQuotaInBytes(OzoneConsts.MAX_QUOTA_IN_BYTES); + + // Provide ACLType of ALL which is default acl rights for user and group. + List listOfAcls = new ArrayList<>(); + //User ACL + listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, + userName, ACLType.ALL, ACCESS)); + //Group ACLs of the User + List userGroups = Arrays.asList(UserGroupInformation + .createRemoteUser(userName).getGroupNames()); + + userGroups.stream().forEach((group) -> listOfAcls.add( + new OzoneAcl(ACLIdentityType.GROUP, group, ACLType.ALL, ACCESS))); + + // Add ACLs + for (OzoneAcl ozoneAcl : listOfAcls) { + omVolumeArgs.addOzoneAcls(OzoneAcl.toProtobuf(ozoneAcl)); + } + + return omVolumeArgs.build(); + + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java new file mode 100644 index 000000000000..fef9b2e35a27 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Bundles information about a bucket, which is possibly a symlink, + * and the real bucket that it resolves to, if it is indeed a link. + * For regular buckets, both {@code requested} and {@code resolved} point to + * the same bucket. + */ +public class ResolvedBucket { + + private final Pair requested; + private final Pair resolved; + + public ResolvedBucket(Pair requested, + Pair resolved) { + this.requested = requested; + this.resolved = resolved; + } + + public Pair requested() { + return requested; + } + + public Pair resolved() { + return resolved; + } + + public String requestedVolume() { + return requested.getLeft(); + } + + public String requestedBucket() { + return requested.getRight(); + } + + public String realVolume() { + return resolved.getLeft(); + } + + public String realBucket() { + return resolved.getRight(); + } + + public OmKeyArgs update(OmKeyArgs args) { + return isLink() + ? args.toBuilder() + .setVolumeName(realVolume()) + .setBucketName(realBucket()) + .build() + : args; + } + + public KeyArgs update(KeyArgs args) { + return isLink() + ? args.toBuilder() + .setVolumeName(realVolume()) + .setBucketName(realBucket()) + .build() + : args; + } + + public boolean isLink() { + return !Objects.equals(requested, resolved); + } + + public Map audit() { + return audit(new LinkedHashMap<>()); + } + + /** + * Adds audit information about the bucket (and if it's a link, then the + * real bucket, too) to {@code auditMap}. + * @return the same map for convenience + */ + public Map audit(Map auditMap) { + auditMap.putIfAbsent(OzoneConsts.VOLUME, requestedVolume()); + auditMap.putIfAbsent(OzoneConsts.BUCKET, requestedBucket()); + if (isLink()) { + auditMap.put(OzoneConsts.SOURCE_VOLUME, realVolume()); + auditMap.put(OzoneConsts.SOURCE_BUCKET, realBucket()); + } + return auditMap; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java index f46afde8d983..f6d04a971d63 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.db.LongCodec; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -31,6 +32,7 @@ import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; +import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; @@ -46,7 +48,7 @@ public class OMDBDefinition implements DBDefinition { String.class, new StringCodec(), RepeatedOmKeyInfo.class, - new RepeatedOmKeyInfoCodec()); + new RepeatedOmKeyInfoCodec(true)); public static final DBColumnFamilyDefinition @@ -83,7 +85,7 @@ public class OMDBDefinition implements DBDefinition { String.class, new StringCodec(), OmKeyInfo.class, - new OmKeyInfoCodec()); + new OmKeyInfoCodec(true)); public static final DBColumnFamilyDefinition KEY_TABLE = @@ -92,7 +94,7 @@ public class OMDBDefinition implements DBDefinition { String.class, new StringCodec(), OmKeyInfo.class, - new OmKeyInfoCodec()); + new OmKeyInfoCodec(true)); public static final DBColumnFamilyDefinition BUCKET_TABLE = @@ -139,6 +141,15 @@ public class OMDBDefinition implements DBDefinition { S3SecretValue.class, new S3SecretValueCodec()); + public static final DBColumnFamilyDefinition + TRANSACTION_INFO_TABLE = + new DBColumnFamilyDefinition<>( + OmMetadataManagerImpl.TRANSACTION_INFO_TABLE, + String.class, + new StringCodec(), + OMTransactionInfo.class, + new OMTransactionInfoCodec()); + @Override public String getName() { @@ -155,7 +166,7 @@ public DBColumnFamilyDefinition[] getColumnFamilies() { return new DBColumnFamilyDefinition[] {DELETED_TABLE, USER_TABLE, VOLUME_TABLE, S3_TABLE, OPEN_KEY_TABLE, KEY_TABLE, BUCKET_TABLE, MULTIPART_INFO_TABLE, PREFIX_TABLE, DTOKEN_TABLE, - S3_SECRET_TABLE}; + S3_SECRET_TABLE, TRANSACTION_INFO_TABLE}; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java index c6b86bb4a5e4..a7e1eabee7ba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java @@ -18,23 +18,34 @@ package org.apache.hadoop.ozone.om.codec; import java.io.IOException; + import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.hdds.utils.db.Codec; import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Codec to encode OmKeyInfo as byte array. */ public class OmKeyInfoCodec implements Codec { + private static final Logger LOG = + LoggerFactory.getLogger(OmKeyInfoCodec.class); + + private final boolean ignorePipeline; + public OmKeyInfoCodec(boolean ignorePipeline) { + this.ignorePipeline = ignorePipeline; + LOG.info("OmKeyInfoCodec ignorePipeline = " + ignorePipeline); + } @Override public byte[] toPersistedFormat(OmKeyInfo object) throws IOException { Preconditions .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getProtobuf().toByteArray(); + return object.getProtobuf(ignorePipeline).toByteArray(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java index 1907b790b582..9156fdf03213 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java @@ -22,6 +22,8 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .RepeatedKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -29,12 +31,21 @@ * Codec to encode RepeatedOmKeyInfo as byte array. */ public class RepeatedOmKeyInfoCodec implements Codec { + private static final Logger LOG = + LoggerFactory.getLogger(RepeatedOmKeyInfoCodec.class); + + private final boolean ignorePipeline; + public RepeatedOmKeyInfoCodec(boolean ignorePipeline) { + this.ignorePipeline = ignorePipeline; + LOG.info("RepeatedOmKeyInfoCodec ignorePipeline = " + ignorePipeline); + } + @Override public byte[] toPersistedFormat(RepeatedOmKeyInfo object) throws IOException { Preconditions.checkNotNull(object, "Null object can't be converted to byte array."); - return object.getProto().toByteArray(); + return object.getProto(ignorePipeline).toByteArray(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java index 22656d887b66..592cae3a366c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java @@ -19,10 +19,12 @@ import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.hdds.utils.db.Codec; import java.io.IOException; +import java.nio.BufferUnderflowException; /** * Codec to encode TokenIdentifierCodec as byte array. @@ -33,7 +35,7 @@ public class TokenIdentifierCodec implements Codec { public byte[] toPersistedFormat(OzoneTokenIdentifier object) { Preconditions .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getBytes(); + return object.toUniqueSerializedKey(); } @Override @@ -42,8 +44,16 @@ public OzoneTokenIdentifier fromPersistedFormat(byte[] rawData) Preconditions.checkNotNull(rawData, "Null byte array can't converted to real object."); try { - return OzoneTokenIdentifier.readProtoBuf(rawData); - } catch (InvalidProtocolBufferException e) { + OzoneTokenIdentifier object = OzoneTokenIdentifier.newInstance(); + return object.fromUniqueSerializedKey(rawData); + } catch (IOException ex) { + try { + return OzoneTokenIdentifier.readProtoBuf(rawData); + } catch (InvalidProtocolBufferException e) { + throw new IllegalArgumentException( + "Can't encode the the raw data from the byte array", e); + } + } catch (BufferUnderflowException e) { throw new IllegalArgumentException( "Can't encode the the raw data from the byte array", e); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java index 647931af0d04..4bcba45b6b9c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java @@ -31,7 +31,18 @@ * Ozone Manager FileSystem interface. */ public interface OzoneManagerFS extends IOzoneAcl { - OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException; + + /** + * Get file status for a file or a directory. + * + * @param args the args of the key provided by client. + * @param clientAddress a hint to key manager, order the datanode in returned + * pipeline by distance between client and datanode. + * @return file status. + * @throws IOException + */ + OzoneFileStatus getFileStatus(OmKeyArgs args, String clientAddress) + throws IOException; void createDirectory(OmKeyArgs args) throws IOException; @@ -49,6 +60,21 @@ OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, */ OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) throws IOException; + /** + * List the status for a file or a directory and its contents. + * + * @param keyArgs the args of the key provided by client. + * @param recursive For a directory if true all the descendants of a + * particular directory are listed + * @param startKey Key from which listing needs to start. If startKey + * exists its status is included in the final list. + * @param numEntries Number of entries to list from the start key + * @param clientAddress a hint to key manager, order the datanode in returned + * pipeline by distance between client and datanode. + * @return list of file status + * @throws IOException + */ List listStatus(OmKeyArgs keyArgs, boolean recursive, - String startKey, long numEntries) throws IOException; + String startKey, long numEntries, String clientAddress) + throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java index 24417515ef13..9b225c82a819 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.Objects; +import org.apache.ratis.server.protocol.TermIndex; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_SPLIT_KEY; @@ -33,7 +34,7 @@ */ public final class OMTransactionInfo { - private long currentTerm; // term associated with the ratis log index. + private long term; // term associated with the ratis log index. // Transaction index corresponds to ratis log index private long transactionIndex; @@ -43,12 +44,12 @@ private OMTransactionInfo(String transactionInfo) { Preconditions.checkState(tInfo.length==2, "Incorrect TransactionInfo value"); - currentTerm = Long.parseLong(tInfo[0]); + term = Long.parseLong(tInfo[0]); transactionIndex = Long.parseLong(tInfo[1]); } private OMTransactionInfo(long currentTerm, long transactionIndex) { - this.currentTerm = currentTerm; + this.term = currentTerm; this.transactionIndex = transactionIndex; } @@ -56,8 +57,8 @@ private OMTransactionInfo(long currentTerm, long transactionIndex) { * Get current term. * @return currentTerm */ - public long getCurrentTerm() { - return currentTerm; + public long getTerm() { + return term; } /** @@ -68,6 +69,10 @@ public long getTransactionIndex() { return transactionIndex; } + public TermIndex getTermIndex() { + return TermIndex.newTermIndex(term, transactionIndex); + } + /** * Generate String form of transaction info which need to be persisted in OM * DB finally in byte array. @@ -75,7 +80,7 @@ public long getTransactionIndex() { */ private String generateTransactionInfo() { StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append(currentTerm); + stringBuilder.append(term); stringBuilder.append(TRANSACTION_INFO_SPLIT_KEY); stringBuilder.append(transactionIndex); @@ -109,13 +114,18 @@ public boolean equals(Object o) { return false; } OMTransactionInfo that = (OMTransactionInfo) o; - return currentTerm == that.currentTerm && + return term == that.term && transactionIndex == that.transactionIndex; } @Override public int hashCode() { - return Objects.hash(currentTerm, transactionIndex); + return Objects.hash(term, transactionIndex); + } + + @Override + public String toString() { + return generateTransactionInfo(); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 2a98db6d40db..d6d2be6ed8e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -407,6 +407,7 @@ private RaftProperties newRaftProperties(ConfigurationSource conf) { StorageUnit.BYTES); RaftServerConfigKeys.Log.setSegmentSizeMax(properties, SizeInBytes.valueOf(raftSegmentSize)); + RaftServerConfigKeys.Log.setPurgeUptoSnapshotIndex(properties, true); // Set RAFT segment pre-allocated size final int raftSegmentPreallocatedSize = (int) conf.getStorageSize( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 3aff87a3941b..93e29eeb5f4f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -19,7 +19,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.ServiceException; import java.io.IOException; @@ -64,7 +63,6 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.METADATA_ERROR; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.REPLAY; /** * The OM StateMachine is the state machine for OM Ratis server. It is @@ -88,7 +86,7 @@ public class OzoneManagerStateMachine extends BaseStateMachine { private final boolean isTracingEnabled; // Map which contains index and term for the ratis transactions which are - // stateMachine entries which are recived through applyTransaction. + // stateMachine entries which are received through applyTransaction. private ConcurrentMap applyTransactionMap = new ConcurrentSkipListMap<>(); @@ -258,12 +256,6 @@ public CompletableFuture applyTransaction(TransactionContext trx) { terminate(omResponse, OMException.ResultCodes.INTERNAL_ERROR); } else if (omResponse.getStatus() == METADATA_ERROR) { terminate(omResponse, OMException.ResultCodes.METADATA_ERROR); - } else if (omResponse.getStatus() == REPLAY) { - // For replay we do not add response to double buffer, so update - // LastAppliedIndex for the replay transactions here. - computeAndUpdateLastAppliedIndex(trxLogIndex, - trx.getLogEntry().getTerm(), Lists.newArrayList(trxLogIndex), - true); } } @@ -371,23 +363,13 @@ public long takeSnapshot() throws IOException { public CompletableFuture notifyInstallSnapshotFromLeader( RaftProtos.RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - String leaderNodeId = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()) - .toString(); - - LOG.info("Received install snapshot notificaiton form OM leader: {} with " + + String leaderNodeId = RaftPeerId.valueOf(roleInfoProto.getFollowerInfo() + .getLeaderInfo().getId().getId()).toString(); + LOG.info("Received install snapshot notification from OM leader: {} with " + "term index: {}", leaderNodeId, firstTermIndexInLog); - if (!roleInfoProto.getRole().equals(RaftProtos.RaftPeerRole.LEADER)) { - // A non-leader Ratis server should not send this notification. - LOG.error("Received Install Snapshot notification from non-leader OM " + - "node: {}. Ignoring the notification.", leaderNodeId); - return completeExceptionally(new OMException("Received notification to " + - "install snaphost from non-leader OM node", - OMException.ResultCodes.RATIS_ERROR)); - } - CompletableFuture future = CompletableFuture.supplyAsync( - () -> ozoneManager.installSnapshot(leaderNodeId), + () -> ozoneManager.installSnapshotFromLeader(leaderNodeId), installSnapshotExecutor); return future; } @@ -529,9 +511,9 @@ public void loadSnapshotInfoFromDB() throws IOException { ozoneManager.getMetadataManager()); if (omTransactionInfo != null) { setLastAppliedTermIndex(TermIndex.newTermIndex( - omTransactionInfo.getCurrentTerm(), + omTransactionInfo.getTerm(), omTransactionInfo.getTransactionIndex())); - snapshotInfo.updateTermIndex(omTransactionInfo.getCurrentTerm(), + snapshotInfo.updateTermIndex(omTransactionInfo.getTerm(), omTransactionInfo.getTransactionIndex()); } LOG.info("LastAppliedIndex is set from TransactionInfo from OM DB as {}", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 4aaaf13f6a4d..681c0da87e6d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest; import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest; import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest; +import org.apache.hadoop.ozone.om.request.key.OMKeysRenameRequest; import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest; @@ -129,6 +130,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest) { return new OMKeysDeleteRequest(omRequest); case RenameKey: return new OMKeyRenameRequest(omRequest); + case RenameKeys: + return new OMKeysRenameRequest(omRequest); case CreateDirectory: return new OMDirectoryCreateRequest(omRequest); case CreateFile: @@ -228,15 +231,35 @@ public static Status exceptionToResponseStatus(IOException exception) { } /** - * Obtain Transaction info from downloaded snapshot DB. + * Obtain OMTransactionInfo from Checkpoint. + */ + public static OMTransactionInfo getTrxnInfoFromCheckpoint( + OzoneConfiguration conf, Path dbPath) throws Exception { + + if (dbPath != null) { + Path dbDir = dbPath.getParent(); + Path dbFile = dbPath.getFileName(); + if (dbDir != null && dbFile != null) { + return getTransactionInfoFromDB(conf, dbDir, dbFile.toString()); + } + } + + throw new IOException("Checkpoint " + dbPath + " does not have proper " + + "DB location"); + } + + /** + * Obtain Transaction info from DB. * @param tempConfig + * @param dbDir path to DB * @return OMTransactionInfo * @throws Exception */ - public static OMTransactionInfo getTransactionInfoFromDownloadedSnapshot( - OzoneConfiguration tempConfig, Path dbDir) throws Exception { - DBStore dbStore = - OmMetadataManagerImpl.loadDB(tempConfig, dbDir.toFile()); + private static OMTransactionInfo getTransactionInfoFromDB( + OzoneConfiguration tempConfig, Path dbDir, String dbName) + throws Exception { + DBStore dbStore = OmMetadataManagerImpl.loadDB(tempConfig, dbDir.toFile(), + dbName); Table transactionInfoTable = dbStore.getTable(TRANSACTION_INFO_TABLE, @@ -245,8 +268,11 @@ public static OMTransactionInfo getTransactionInfoFromDownloadedSnapshot( OMTransactionInfo omTransactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY); dbStore.close(); - OzoneManager.LOG.info("Downloaded checkpoint with OMTransactionInfo {}", - omTransactionInfo); + + if (omTransactionInfo == null) { + throw new IOException("Failed to read OMTransactionInfo from DB " + + dbName + " at " + dbDir); + } return omTransactionInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 03531440e5bf..0fa9ca1a8d2c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -18,15 +18,9 @@ package org.apache.hadoop.ozone.om.request; -import java.io.IOException; -import java.net.InetAddress; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Set; - import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; - +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ozone.OzoneConsts; @@ -36,25 +30,27 @@ import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.annotation.Nonnull; +import java.io.IOException; +import java.net.InetAddress; +import java.nio.file.Paths; +import java.util.LinkedHashMap; +import java.util.Map; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.REPLAY; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; /** * OMClientRequest provides methods which every write OM request should @@ -62,6 +58,8 @@ */ public abstract class OMClientRequest implements RequestAuditor { + private static final Logger LOG = + LoggerFactory.getLogger(OMClientRequest.class); private OMRequest omRequest; /** @@ -71,8 +69,6 @@ public abstract class OMClientRequest implements RequestAuditor { public enum Result { SUCCESS, // The request was executed successfully - REPLAY, // The request is a replay and was ignored - FAILURE // The request failed and exception was thrown } @@ -224,39 +220,8 @@ protected OMResponse createErrorOMResponse( return omResponse.build(); } - /** - * Set parameters needed for return error response to client. - * - * @param omResponse - * @param ex - IOException - * @param unDeletedKeys - Set - * @return error response need to be returned to client - OMResponse. - */ - protected OMResponse createOperationKeysErrorOMResponse( - @Nonnull OMResponse.Builder omResponse, - @Nonnull IOException ex, @Nonnull Set unDeletedKeys) { - omResponse.setSuccess(false); - StringBuffer errorMsg = new StringBuffer(); - DeleteKeysResponse.Builder resp = DeleteKeysResponse.newBuilder(); - for (OmKeyInfo key : unDeletedKeys) { - if(key != null) { - resp.addUnDeletedKeys(key.getProtobuf()); - } - } - if (errorMsg != null) { - omResponse.setMessage(errorMsg.toString()); - } - // TODO: Currently all delete operations in OzoneBucket.java are void. Here - // we put the List of unDeletedKeys into Response. These KeyInfo can be - // used to continue deletion if client support delete retry. - omResponse.setDeleteKeysResponse(resp.build()); - omResponse.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); - return omResponse.build(); - } - /** * Add the client response to double buffer and set the flush future. - * For responses which has status set to REPLAY it is a no-op. * @param trxIndex * @param omClientResponse * @param omDoubleBufferHelper @@ -265,13 +230,8 @@ protected void addResponseToDoubleBuffer(long trxIndex, OMClientResponse omClientResponse, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { if (omClientResponse != null) { - // For replay transaction we do not need to add to double buffer, as - // for these transactions there is nothing needs to be done for - // addDBToBatch. - if (omClientResponse.getOMResponse().getStatus() != REPLAY) { - omClientResponse.setFlushFuture( - omDoubleBufferHelper.add(omClientResponse, trxIndex)); - } + omClientResponse.setFlushFuture( + omDoubleBufferHelper.add(omClientResponse, trxIndex)); } } @@ -314,28 +274,71 @@ public Map buildVolumeAuditMap(String volume) { return auditMap; } - /** - * Check if the transaction is a replay. - * @param ozoneObj OMVolumeArgs or OMBucketInfo or OMKeyInfo object whose - * updateID needs to be compared with - * @param transactionID the current transaction ID - * @return true if transactionID is less than or equal to updateID, false - * otherwise. - */ - protected boolean isReplay(OzoneManager om, WithObjectID ozoneObj, - long transactionID) { - return om.isRatisEnabled() && ozoneObj.isUpdateIDset() && - transactionID <= ozoneObj.getUpdateID(); + + public static String validateAndNormalizeKey(boolean enableFileSystemPaths, + String keyName) throws OMException { + if (enableFileSystemPaths) { + return validateAndNormalizeKey(keyName); + } else { + return keyName; + } + } + + @SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME") + public static String validateAndNormalizeKey(String keyName) + throws OMException { + String normalizedKeyName; + if (keyName.startsWith(OM_KEY_PREFIX)) { + normalizedKeyName = Paths.get(keyName).toUri().normalize().getPath(); + } else { + normalizedKeyName = Paths.get(OM_KEY_PREFIX, keyName).toUri() + .normalize().getPath(); + } + if (!keyName.equals(normalizedKeyName)) { + LOG.debug("Normalized key {} to {} ", keyName, + normalizedKeyName.substring(1)); + } + return isValidKeyPath(normalizedKeyName.substring(1)); } /** - * Return a dummy OMClientResponse for when the transactions are replayed. + * Whether the pathname is valid. Check key names which contain a + * ":", ".", "..", "//", "". If it has any of these characters throws + * OMException, else return the path. */ - protected OMResponse createReplayOMResponse( - @Nonnull OMResponse.Builder omResponse) { + private static String isValidKeyPath(String path) throws OMException { + boolean isValid = true; + + // If keyName is empty string throw error. + if (path.length() == 0) { + throw new OMException("Invalid KeyPath, empty keyName" + path, + INVALID_KEY_NAME); + } else if(path.startsWith("/")) { + isValid = false; + } else { + // Check for ".." "." ":" "/" + String[] components = StringUtils.split(path, '/'); + for (int i = 0; i < components.length; i++) { + String element = components[i]; + if (element.equals(".") || + (element.contains(":")) || + (element.contains("/") || element.equals(".."))) { + isValid = false; + break; + } + + // The string may end with a /, but not have + // "//" in the middle. + if (element.isEmpty() && i != components.length - 1) { + isValid = false; + } + } + } - omResponse.setSuccess(false); - omResponse.setStatus(REPLAY); - return omResponse.build(); + if (isValid) { + return path; + } else { + throw new OMException("Invalid KeyPath " + path, INVALID_KEY_NAME); + } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 8181a64e3a72..7c60f6180817 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -69,6 +69,8 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -113,6 +115,20 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { newBucketInfo.setBeinfo(getBeinfo(kmsProvider, bucketInfo)); } + boolean hasSourceVolume = bucketInfo.hasSourceVolume(); + boolean hasSourceBucket = bucketInfo.hasSourceBucket(); + + if (hasSourceBucket != hasSourceVolume) { + throw new OMException("Both source volume and source bucket are " + + "required for bucket links", + OMException.ResultCodes.INVALID_REQUEST); + } + + if (hasSourceBucket && bucketInfo.hasBeinfo()) { + throw new OMException("Encryption cannot be set for bucket links", + OMException.ResultCodes.INVALID_REQUEST); + } + newCreateBucketRequest.setBucketInfo(newBucketInfo.build()); return getOmRequest().toBuilder().setUserInfo(getUserInfo()) @@ -167,27 +183,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, //Check if the volume exists if (omVolumeArgs == null) { LOG.debug("volume: {} not found ", volumeName); - throw new OMException("Volume doesn't exist", - OMException.ResultCodes.VOLUME_NOT_FOUND); + throw new OMException("Volume doesn't exist", VOLUME_NOT_FOUND); } //Check if bucket already exists - OmBucketInfo dbBucketInfo = metadataManager.getBucketTable() - .getReadCopy(bucketKey); - if (dbBucketInfo != null) { - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, dbBucketInfo, transactionLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, createBucketRequest); - return new OMBucketCreateResponse(createReplayOMResponse(omResponse)); - } else { - LOG.debug("bucket: {} already exists ", bucketName); - throw new OMException("Bucket already exist", - OMException.ResultCodes.BUCKET_ALREADY_EXISTS); - } + if (metadataManager.getBucketTable().isExist(bucketKey)) { + LOG.debug("bucket: {} already exists ", bucketName); + throw new OMException("Bucket already exist", BUCKET_ALREADY_EXISTS); } // Add objectID and updateID @@ -211,7 +213,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } catch (IOException ex) { exception = ex; omClientResponse = new OMBucketCreateResponse( - createErrorOMResponse(omResponse, exception), omBucketInfo); + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index 18bf3ae3bb13..91aef6a2d44a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.OzoneConsts; @@ -52,6 +51,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; @@ -102,7 +102,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName, bucketName, null); } - // acquire lock acquiredVolumeLock = omMetadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName); @@ -111,25 +110,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName, bucketName); // No need to check volume exists here, as bucket cannot be created - // with out volume creation. - //Check if bucket exists + // with out volume creation. Check if bucket exists String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable() - .getReadCopy(bucketKey); - if (omBucketInfo == null) { - LOG.debug("bucket: {} not found ", bucketName); - throw new OMException("Bucket doesn't exist", - OMException.ResultCodes.BUCKET_NOT_FOUND); - } - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omBucketInfo, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, deleteBucketRequest); - return new OMBucketDeleteResponse(createReplayOMResponse(omResponse)); + if (!omMetadataManager.getBucketTable().isExist(bucketKey)) { + LOG.debug("bucket: {} not found ", bucketName); + throw new OMException("Bucket already exist", BUCKET_NOT_FOUND); } //Check if bucket is empty @@ -155,7 +141,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, success = false; exception = ex; omClientResponse = new OMBucketDeleteResponse( - createErrorOMResponse(omResponse, exception), volumeName, bucketName); + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 2288de73aa8e..d90f08ea7c8c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -121,16 +121,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMException.ResultCodes.BUCKET_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - // If a replay, then the response has already been returned to the - // client. So take no further action and return a dummy OMClientResponse. - if (isReplay(ozoneManager, dbBucketInfo, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, setBucketPropertyRequest); - return new OMBucketSetPropertyResponse( - createReplayOMResponse(omResponse)); - } - OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder(); bucketInfoBuilder.setVolumeName(dbBucketInfo.getVolumeName()) .setBucketName(dbBucketInfo.getBucketName()) @@ -190,7 +180,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, success = false; exception = ex; omClientResponse = new OMBucketSetPropertyResponse( - createErrorOMResponse(omResponse, exception), omBucketInfo); + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java index f162e8836a46..a493f9fa1472 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.bucket.acl.OMBucketAclResponse; import org.apache.hadoop.ozone.util.BooleanBiFunction; import org.apache.hadoop.ozone.om.request.util.ObjectParser; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -106,16 +105,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, throw new OMException(OMException.ResultCodes.BUCKET_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omBucketInfo, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, getOmRequest()); - return new OMBucketAclResponse(createReplayOMResponse(omResponse)); - } - operationResult = omBucketAclOp.apply(ozoneAcls, omBucketInfo); omBucketInfo.setUpdateID(transactionLogIndex, ozoneManager.isRatisEnabled()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 7f860fc2194d..7b2ab51f0c17 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -29,7 +29,6 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -91,8 +90,6 @@ public class OMDirectoryCreateRequest extends OMKeyRequest { public enum Result { SUCCESS, // The request was executed successfully - REPLAY, // The request is a replay and was ignored - DIRECTORY_ALREADY_EXISTS, // Directory key already exists in DB FAILURE // The request failed and exception was thrown @@ -149,6 +146,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, List missingParentInfos; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); @@ -197,34 +198,20 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName, bucketName, Optional.of(dirKeyInfo), Optional.of(missingParentInfos), trxnLogIndex); - - omClientResponse = new OMDirectoryCreateResponse(omResponse.build(), - dirKeyInfo, missingParentInfos); result = Result.SUCCESS; + omClientResponse = new OMDirectoryCreateResponse(omResponse.build(), + dirKeyInfo, missingParentInfos, result); } else { // omDirectoryResult == DIRECTORY_EXITS - // Check if this is a replay of ratis logs - String dirKey = omMetadataManager.getOzoneDirKey(volumeName, - bucketName, keyName); - OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().get(dirKey); - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - throw new OMReplayException(); - } else { - result = Result.DIRECTORY_ALREADY_EXISTS; - omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS); - omClientResponse = new OMDirectoryCreateResponse(omResponse.build()); - } + result = Result.DIRECTORY_ALREADY_EXISTS; + omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS); + omClientResponse = new OMDirectoryCreateResponse(omResponse.build(), + result); } } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMDirectoryCreateResponse( - createReplayOMResponse(omResponse)); - } else { - exception = ex; - omClientResponse = new OMDirectoryCreateResponse( - createErrorOMResponse(omResponse, exception)); - } + exception = ex; + omClientResponse = new OMDirectoryCreateResponse( + createErrorOMResponse(omResponse, exception), result); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -234,10 +221,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } - if (result != Result.REPLAY) { - auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY, - auditMap, exception, userInfo)); - } + auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY, + auditMap, exception, userInfo)); logResult(createDirectoryRequest, keyArgs, omMetrics, result, trxnLogIndex, exception); @@ -314,12 +299,6 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, volumeName, bucketName, keyName); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - createDirectoryRequest); - } - break; case DIRECTORY_ALREADY_EXISTS: if (LOG.isDebugEnabled()) { LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 4db8f8014f2c..3226f7817797 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -32,7 +32,6 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; import org.slf4j.Logger; @@ -167,6 +166,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); KeyArgs keyArgs = createFileRequest.getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -200,6 +200,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, IOException exception = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); @@ -216,26 +220,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMException.ResultCodes.NOT_A_FILE); } - // Check if Key already exists in KeyTable and this transaction is a - // replay. String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable() .getIfExist(ozoneKey); + if (dbKeyInfo != null) { - // Check if this transaction is a replay of ratis logs. - // We check only the KeyTable here and not the OpenKeyTable. In case - // this transaction is a replay but the transaction was not committed - // to the KeyTable, then we recreate the key in OpenKey table. This is - // okay as all the subsequent transactions would also be replayed and - // the openKey table would eventually reach the same state. - // The reason we do not check the OpenKey table is to avoid a DB read - // in regular non-replay scenario. - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy response. - throw new OMReplayException(); - } + ozoneManager.getKeyManager().refresh(dbKeyInfo); } OMFileRequest.OMPathInfo pathInfo = @@ -312,18 +303,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMFileCreateResponse(createReplayOMResponse( - omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - omMetrics.incNumCreateFileFails(); - omResponse.setCmdType(Type.CreateFile); - omClientResponse = new OMFileCreateResponse(createErrorOMResponse( + result = Result.FAILURE; + exception = ex; + omMetrics.incNumCreateFileFails(); + omResponse.setCmdType(Type.CreateFile); + omClientResponse = new OMFileCreateResponse(createErrorOMResponse( omResponse, exception)); - } } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -334,18 +319,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // Audit Log outside the lock - if (result != Result.REPLAY) { - Map auditMap = buildKeyArgsAuditMap(keyArgs); - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.CREATE_FILE, auditMap, exception, - getOmRequest().getUserInfo())); - } + auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.CREATE_FILE, auditMap, exception, + getOmRequest().getUserInfo())); switch (result) { - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - createFileRequest); - break; case SUCCESS: LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 3367ec7e4758..21ffff815e09 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -197,7 +197,7 @@ public boolean directParentExists() { /** * Return codes used by verifyFilesInPath method. */ - enum OMDirectoryResult { + public enum OMDirectoryResult { // In below examples path is assumed as "a/b/c" in volume volume1 and // bucket b1. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index 348c96a2ae6d..94d700f271b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -25,7 +25,6 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -111,9 +110,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { ozoneManager.getPreallocateBlocksMax(), ozoneManager.isGrpcBlockTokenEnabled(), ozoneManager.getOMNodeId()); - // Set modification time + // Set modification time and normalize key if required. KeyArgs.Builder newKeyArgs = keyArgs.toBuilder() - .setModificationTime(Time.now()); + .setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName())); AllocateBlockRequest.Builder newAllocatedBlockRequest = AllocateBlockRequest.newBuilder() @@ -162,18 +163,20 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID)); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - String openKeyName = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); + String openKeyName = null; OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); OMClientResponse omClientResponse = null; - OmKeyInfo openKeyInfo = null; + OmKeyInfo openKeyInfo; IOException exception = null; - Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, allocateBlockRequest.getClientID()); @@ -184,32 +187,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // Here we don't acquire bucket/volume lock because for a single client // allocateBlock is called in serial fashion. + openKeyName = omMetadataManager.getOpenKey(volumeName, bucketName, + keyName, clientID); openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKeyName); if (openKeyInfo == null) { - // Check if this transaction is a replay of ratis logs. - // If the Key was already committed and this transaction is being - // replayed, we should ignore this transaction. - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, - bucketName, keyName); - OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - if (dbKeyInfo != null) { - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - // This transaction is a replay. Send replay response. - throw new OMReplayException(); - } - } throw new OMException("Open Key not found " + openKeyName, KEY_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - // Check the updateID of the openKey to verify that it is not greater - // than the current transactionLogIndex - if (isReplay(ozoneManager, openKeyInfo, trxnLogIndex)) { - // This transaction is a replay. Send replay response. - throw new OMReplayException(); - } - // Append new block openKeyInfo.appendNewBlocks(Collections.singletonList( OmKeyLocationInfo.getFromProtobuf(blockLocation)), false); @@ -229,35 +214,23 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setKeyLocation(blockLocation).build()); omClientResponse = new OMAllocateBlockResponse(omResponse.build(), openKeyInfo, clientID); - result = Result.SUCCESS; LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}", volumeName, bucketName, openKeyName); } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMAllocateBlockResponse(createReplayOMResponse( - omResponse)); - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - allocateBlockRequest); - } else { - result = Result.FAILURE; - omMetrics.incNumBlockAllocateCallFails(); - exception = ex; - omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse( - omResponse, exception)); - LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " + + omMetrics.incNumBlockAllocateCallFails(); + exception = ex; + omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse( + omResponse, exception)); + LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " + "Exception:{}", volumeName, bucketName, openKeyName, exception); - } } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); } - if (result != Result.REPLAY) { - auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap, - exception, getOmRequest().getUserInfo())); - } + auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap, + exception, getOmRequest().getUserInfo())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 7ee7db51772d..dccb93bb9da3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -71,13 +70,6 @@ public class OMKeyCommitRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(OMKeyCommitRequest.class); - private enum Result { - SUCCESS, - REPLAY, - DELETE_OPEN_KEY_ONLY, - FAILURE - } - public OMKeyCommitRequest(OMRequest omRequest) { super(omRequest); } @@ -99,7 +91,9 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } KeyArgs.Builder newKeyArgs = - keyArgs.toBuilder().setModificationTime(Time.now()); + keyArgs.toBuilder().setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName())); return getOmRequest().toBuilder() .setCommitKeyRequest(commitKeyRequest.toBuilder() @@ -133,58 +127,37 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo omKeyInfo = null; OMClientResponse omClientResponse = null; boolean bucketLockAcquired = false; - Result result = null; + Result result; OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, commitKeyRequest.getClientID()); try { + commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap); + volumeName = commitKeyArgs.getVolumeName(); + bucketName = commitKeyArgs.getBucketName(); + // check Acl checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, commitKeyRequest.getClientID()); + String dbOzoneKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, + keyName, commitKeyRequest.getClientID()); + List locationInfoList = new ArrayList<>(); for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) { locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation)); } - bucketLockAcquired = omMetadataManager.getLock().acquireLock(BUCKET_LOCK, - volumeName, bucketName); + bucketLockAcquired = + omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - // Revisit this logic to see how we can skip this check when ratis is - // enabled. - if (ozoneManager.isRatisEnabled()) { - // Check if OzoneKey already exists in DB - OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable() - .getIfExist(dbOzoneKey); - if (dbKeyInfo != null) { - // Check if this transaction is a replay of ratis logs - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - // During KeyCreate, we do not check the OpenKey Table for replay. - // This is so as to avoid an extra DB read during KeyCreate. - // If KeyCommit is a replay, the KeyCreate request could also have - // been replayed. And since we do not check for replay in KeyCreate, - // we should scrub the key from OpenKey table now, is it exists. - - omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey); - if (omKeyInfo != null) { - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(dbOpenKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); - - throw new OMReplayException(true); - } - throw new OMReplayException(); - } - } - } - omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey); if (omKeyInfo == null) { throw new OMException("Failed to commit key, as " + dbOpenKey + @@ -214,22 +187,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - if (((OMReplayException) ex).isDBOperationNeeded()) { - result = Result.DELETE_OPEN_KEY_ONLY; - omClientResponse = new OMKeyCommitResponse(omResponse.build(), - dbOpenKey); - } else { - result = Result.REPLAY; - omClientResponse = new OMKeyCommitResponse(createReplayOMResponse( - omResponse)); - } - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = new OMKeyCommitResponse(createErrorOMResponse( - omResponse, exception)); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = new OMKeyCommitResponse(createErrorOMResponse( + omResponse, exception)); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -240,11 +201,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } - // Performing audit logging outside of the lock. - if (result != Result.REPLAY && result != Result.DELETE_OPEN_KEY_ONLY) { - auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, + auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, exception, getOmRequest().getUserInfo())); - } switch (result) { case SUCCESS: @@ -253,21 +211,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // As key also can have multiple versions, we need to increment keys // only if version is 0. Currently we have not complete support of // versioning of keys. So, this can be revisited later. - if (omKeyInfo.getKeyLocationVersions().size() == 1) { omMetrics.incNumKeys(); } - LOG.debug("Key commited. Volume:{}, Bucket:{}, Key:{}", volumeName, + LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - commitKeyRequest); - break; - case DELETE_OPEN_KEY_ONLY: - LOG.debug("Replayed Transaction {}. Deleting old key {} from OpenKey " + - "table. Request: {}", trxnLogIndex, dbOpenKey, commitKeyRequest); - break; case FAILURE: LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}. Exception:{}", volumeName, bucketName, keyName, exception); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 3f4266f635be..1b712fb2921b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.key; import java.io.IOException; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -27,7 +28,11 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,7 +45,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -64,7 +68,10 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.hdds.utils.UniqueId; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; /** * Handles CreateKey request. @@ -92,6 +99,20 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { if(checkKeyNameEnabled){ OmUtils.validateKeyName(keyArgs.getKeyName()); } + + String keyPath = keyArgs.getKeyName(); + if (ozoneManager.getEnableFileSystemPaths()) { + // If enabled, disallow keys with trailing /. As in fs semantics + // directories end with trailing /. + keyPath = validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyPath); + if (keyPath.endsWith("/")) { + throw new OMException("Invalid KeyPath, key names with trailing / " + + "are not allowed." + keyPath, + OMException.ResultCodes.INVALID_KEY_NAME); + } + } + // We cannot allocate block for multipart upload part when // createMultipartKey is called, as we will not know type and factor with // which initiateMultipartUpload has started for this key. When @@ -132,7 +153,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { // As for a client for the first time this can be executed on any OM, // till leader is identified. - List< OmKeyLocationInfo > omKeyLocationInfoList = + List omKeyLocationInfoList = allocateBlock(ozoneManager.getScmClient(), ozoneManager.getBlockTokenSecretManager(), type, factor, new ExcludeList(), requestedSize, scmBlockSize, @@ -150,7 +171,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { newKeyArgs = keyArgs.toBuilder().setModificationTime(Time.now()); } + newKeyArgs.setKeyName(keyPath); + generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager); + newCreateKeyRequest = createKeyRequest.toBuilder().setKeyArgs(newKeyArgs) .setClientID(UniqueId.next()); @@ -161,11 +185,13 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override + @SuppressWarnings("methodlength") public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest(); KeyArgs keyArgs = createKeyRequest.getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -184,7 +210,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getOmRequest()); IOException exception = null; Result result = null; + List missingParentInfos = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); @@ -201,28 +232,49 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, keyName); OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().getIfExist(dbKeyName); + if (dbKeyInfo != null) { - // Check if this transaction is a replay of ratis logs. - // We check only the KeyTable here and not the OpenKeyTable. In case - // this transaction is a replay but the transaction was not committed - // to the KeyTable, then we recreate the key in OpenKey table. This is - // okay as all the subsequent transactions would also be replayed and - // the openKey table would eventually reach the same state. - // The reason we do not check the OpenKey table is to avoid a DB read - // in regular non-replay scenario. - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - throw new OMReplayException(); - } + ozoneManager.getKeyManager().refresh(dbKeyInfo); } OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)); + // If FILE_EXISTS we just override like how we used to do for Key Create. + List< OzoneAcl > inheritAcls; + if (ozoneManager.getEnableFileSystemPaths()) { + OMFileRequest.OMPathInfo pathInfo = + OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName, + bucketName, keyName, Paths.get(keyName)); + OMFileRequest.OMDirectoryResult omDirectoryResult = + pathInfo.getDirectoryResult(); + inheritAcls = pathInfo.getAcls(); + + // Check if a file or directory exists with same key name. + if (omDirectoryResult == DIRECTORY_EXISTS) { + throw new OMException("Cannot write to " + + "directory. createIntermediateDirs behavior is enabled and " + + "hence / has special interpretation: " + keyName, NOT_A_FILE); + } else + if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) { + throw new OMException("Can not create file: " + keyName + + " as there is already file in the given path", NOT_A_FILE); + } + + missingParentInfos = OMDirectoryCreateRequest + .getAllParentInfo(ozoneManager, keyArgs, + pathInfo.getMissingParents(), inheritAcls, trxnLogIndex); + + // Add cache entries for the prefix directories. + // Skip adding for the file key itself, until Key Commit. + OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName, + bucketName, Optional.absent(), Optional.of(missingParentInfos), + trxnLogIndex); + + } + omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs, dbKeyInfo, - keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), + keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -250,22 +302,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setOpenVersion(openVersion).build()) .setCmdType(Type.CreateKey); omClientResponse = new OMKeyCreateResponse(omResponse.build(), - omKeyInfo, null, clientID); + omKeyInfo, missingParentInfos, clientID); result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMKeyCreateResponse(createReplayOMResponse( - omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - omMetrics.incNumKeyAllocateFails(); - omResponse.setCmdType(Type.CreateKey); - omClientResponse = new OMKeyCreateResponse(createErrorOMResponse( - omResponse, exception)); - } + result = Result.FAILURE; + exception = ex; + omMetrics.incNumKeyAllocateFails(); + omResponse.setCmdType(Type.CreateKey); + omClientResponse = new OMKeyCreateResponse( + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -276,25 +322,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // Audit Log outside the lock - if (result != Result.REPLAY) { - Map auditMap = buildKeyArgsAuditMap(keyArgs); - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.ALLOCATE_KEY, auditMap, exception, - getOmRequest().getUserInfo())); - } + auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.ALLOCATE_KEY, auditMap, exception, + getOmRequest().getUserInfo())); switch (result) { case SUCCESS: LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - createKeyRequest); - break; case FAILURE: LOG.error("Key creation failed. Volume:{}, Bucket:{}, Key{}. " + - "Exception:{}", volumeName, bucketName, keyName, exception); + "Exception:{}", volumeName, bucketName, keyName, exception); break; default: LOG.error("Unrecognized Result for OMKeyCreateRequest: {}", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 167330a302f1..4d8562c206d6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse; @@ -76,7 +75,9 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs(); OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs = - keyArgs.toBuilder().setModificationTime(Time.now()); + keyArgs.toBuilder().setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName())); return getOmRequest().toBuilder() .setDeleteKeyRequest(deleteKeyRequest.toBuilder() @@ -89,12 +90,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); - OzoneManagerProtocolProtos.KeyArgs deleteKeyArgs = + OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); - String volumeName = deleteKeyArgs.getVolumeName(); - String bucketName = deleteKeyArgs.getBucketName(); - String keyName = deleteKeyArgs.getKeyName(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumKeyDeletes(); @@ -102,8 +104,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, AuditLogger auditLogger = ozoneManager.getAuditLogger(); OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); - Map auditMap = buildKeyArgsAuditMap(deleteKeyArgs); - OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); @@ -112,6 +112,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMClientResponse omClientResponse = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); @@ -130,14 +134,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, throw new OMException("Key not found", KEY_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - throw new OMReplayException(); - } - // Set the UpdateID to current transactionLogIndex omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -158,16 +154,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMKeyDeleteResponse(createReplayOMResponse( - omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = new OMKeyDeleteResponse(createErrorOMResponse( - omResponse, exception)); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = new OMKeyDeleteResponse( + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -178,10 +168,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // Performing audit logging outside of the lock. - if (result != Result.REPLAY) { - auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap, - exception, userInfo)); - } + auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap, + exception, userInfo)); + switch (result) { case SUCCESS: @@ -189,10 +178,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - deleteKeyRequest); - break; case FAILURE: omMetrics.incNumKeyDeleteFails(); LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index f7783dbe42c6..ce7f1e98f9c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -18,12 +18,8 @@ package org.apache.hadoop.ozone.om.request.key; -import java.io.IOException; import java.util.ArrayList; -import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -37,8 +33,6 @@ import java.util.List; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - /** * Handles purging of keys from OM DB. */ @@ -54,9 +48,6 @@ public OMKeyPurgeRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest(); List bucketDeletedKeysList = purgeKeysRequest .getDeletedKeysList(); @@ -65,97 +56,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); OMClientResponse omClientResponse = null; - boolean success = true; - IOException exception = null; - // Filter the keys that have updateID > transactionLogIndex. This is done so - // that in case this transaction is a replay, we do not purge keys - // created after the original purge request. - // PurgeKeys request has keys belonging to same bucket grouped together. - // We get each bucket lock and check the above condition. - for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { - boolean acquiredLock = false; - String volumeName = bucketWithDeleteKeys.getVolumeName(); - String bucketName = bucketWithDeleteKeys.getBucketName(); - ArrayList keysNotPurged = new ArrayList<>(); - Result result = null; - try { - acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - for (String deletedKey : bucketWithDeleteKeys.getKeysList()) { - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(deletedKey); - boolean purgeKey = true; - if (repeatedOmKeyInfo != null) { - for (OmKeyInfo omKeyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) { - // Discard those keys whose updateID is > transactionLogIndex. - // This could happen when the PurgeRequest is replayed. - if (isReplay(ozoneManager, omKeyInfo, - trxnLogIndex)) { - purgeKey = false; - result = Result.REPLAY; - break; - } - // TODO: If a deletedKey has any one OmKeyInfo which was - // deleted after the original PurgeRequest (updateID > - // trxnLogIndex), we avoid purging that whole key in the - // replay request. Instead of discarding the whole key, we can - // identify the OmKeyInfo's which have updateID < - // trxnLogIndex and purge only those OMKeyInfo's from the - // deletedKey in DeletedTable. - } - if (purgeKey) { - keysToBePurgedList.add(deletedKey); - } else { - keysNotPurged.add(deletedKey); - } - } - } - } catch (IOException ex) { - success = false; - exception = ex; - break; - } finally { - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - if (result == Result.REPLAY) { - LOG.debug("Replayed Transaction {}. Request: {}", trxnLogIndex, - purgeKeysRequest); - if (!keysNotPurged.isEmpty()) { - StringBuilder notPurgeList = new StringBuilder(); - for (String key : keysNotPurged) { - notPurgeList.append(", ").append(key); - } - LOG.debug("Following keys from Volume:{}, Bucket:{} will not be" + - " purged: {}", notPurgeList.toString().substring(2)); - } + for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { + for (String deletedKey : bucketWithDeleteKeys.getKeysList()) { + keysToBePurgedList.add(deletedKey); } } - if (success) { - if (LOG.isDebugEnabled()) { - if (keysToBePurgedList.isEmpty()) { - LOG.debug("No keys will be purged as part of KeyPurgeRequest: {}", - purgeKeysRequest); - } else { - LOG.debug("Following keys will be purged as part of " + - "KeyPurgeRequest: {} - {}", purgeKeysRequest, - String.join(",", keysToBePurgedList)); - } - } - omClientResponse = new OMKeyPurgeResponse(omResponse.build(), + omClientResponse = new OMKeyPurgeResponse(omResponse.build(), keysToBePurgedList); - } else { - omClientResponse = new OMKeyPurgeResponse(createErrorOMResponse( - omResponse, exception)); - } - addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); + return omClientResponse; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index f0069a168161..4e7c05c0978e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -71,16 +71,6 @@ public OMKeyRenameRequest(OMRequest omRequest) { super(omRequest); } - /** - * Stores the result of request execution for Rename Requests. - */ - private enum Result { - SUCCESS, - DELETE_FROM_KEY_ONLY, - REPLAY, - FAILURE, - } - @Override public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @@ -95,13 +85,16 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { OmUtils.validateKeyName(renameKeyRequest.getToKeyName()); } + KeyArgs renameKeyArgs = renameKeyRequest.getKeyArgs(); + // Set modification time. - KeyArgs.Builder newKeyArgs = renameKeyRequest.getKeyArgs().toBuilder() + KeyArgs.Builder newKeyArgs = renameKeyArgs.toBuilder() .setModificationTime(Time.now()); return getOmRequest().toBuilder() .setRenameKeyRequest(renameKeyRequest.toBuilder() - .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build(); + .setKeyArgs(newKeyArgs)) + .setUserInfo(getUserInfo()).build(); } @@ -111,12 +104,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest(); - OzoneManagerProtocolProtos.KeyArgs renameKeyArgs = + OzoneManagerProtocolProtos.KeyArgs keyArgs = renameKeyRequest.getKeyArgs(); + Map auditMap = buildAuditMap(keyArgs, renameKeyRequest); - String volumeName = renameKeyArgs.getVolumeName(); - String bucketName = renameKeyArgs.getBucketName(); - String fromKeyName = renameKeyArgs.getKeyName(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String fromKeyName = keyArgs.getKeyName(); String toKeyName = renameKeyRequest.getToKeyName(); OMMetrics omMetrics = ozoneManager.getMetrics(); @@ -124,9 +118,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, AuditLogger auditLogger = ozoneManager.getAuditLogger(); - Map auditMap = - buildAuditMap(renameKeyArgs, renameKeyRequest); - OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); @@ -142,6 +133,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, throw new OMException("Key name is empty", OMException.ResultCodes.INVALID_KEY_NAME); } + + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // check Acls to see if user has access to perform delete operation on // old key and create operation on new key checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName, @@ -162,87 +158,40 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo toKeyValue = omMetadataManager.getKeyTable().get(toKey); if (toKeyValue != null) { - - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, toKeyValue, trxnLogIndex)) { - - // Check if fromKey is still in the DB and created before this - // replay. - // For example, lets say we have the following sequence of - // transactions. - // Trxn 1 : Create Key1 - // Trnx 2 : Rename Key1 to Key2 -> Deletes Key1 and Creates Key2 - // Now if these transactions are replayed: - // Replay Trxn 1 : Creates Key1 again as Key1 does not exist in DB - // Replay Trxn 2 : Key2 is not created as it exists in DB and the - // request would be deemed a replay. But Key1 - // is still in the DB and needs to be deleted. - fromKeyValue = omMetadataManager.getKeyTable().get(fromKey); - if (fromKeyValue != null) { - // Check if this replay transaction was after the fromKey was - // created. If so, we have to delete the fromKey. - if (ozoneManager.isRatisEnabled() && - trxnLogIndex > fromKeyValue.getUpdateID()) { - // Add to cache. Only fromKey should be deleted. ToKey already - // exists in DB as this transaction is a replay. - result = Result.DELETE_FROM_KEY_ONLY; - Table keyTable = omMetadataManager - .getKeyTable(); - keyTable.addCacheEntry(new CacheKey<>(fromKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); - - omClientResponse = new OMKeyRenameResponse(omResponse - .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(), - fromKeyName, fromKeyValue); - } - } - - if (result == null) { - result = Result.REPLAY; - // If toKey exists and fromKey does not, then no further action is - // required. Return a dummy OMClientResponse. - omClientResponse = new OMKeyRenameResponse(createReplayOMResponse( - omResponse)); - } - } else { - // This transaction is not a replay. toKeyName should not exist - throw new OMException("Key already exists " + toKeyName, + throw new OMException("Key already exists " + toKeyName, OMException.ResultCodes.KEY_ALREADY_EXISTS); - } - } else { - - // This transaction is not a replay. + } - // fromKeyName should exist - fromKeyValue = omMetadataManager.getKeyTable().get(fromKey); - if (fromKeyValue == null) { + // fromKeyName should exist + fromKeyValue = omMetadataManager.getKeyTable().get(fromKey); + if (fromKeyValue == null) { // TODO: Add support for renaming open key - throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND); - } + throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND); + } - fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); - fromKeyValue.setKeyName(toKeyName); - //Set modification time - fromKeyValue.setModificationTime(renameKeyArgs.getModificationTime()); + fromKeyValue.setKeyName(toKeyName); - // Add to cache. - // fromKey should be deleted, toKey should be added with newly updated - // omKeyInfo. - Table keyTable = omMetadataManager.getKeyTable(); + //Set modification time + fromKeyValue.setModificationTime(keyArgs.getModificationTime()); - keyTable.addCacheEntry(new CacheKey<>(fromKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); + // Add to cache. + // fromKey should be deleted, toKey should be added with newly updated + // omKeyInfo. + Table keyTable = omMetadataManager.getKeyTable(); - keyTable.addCacheEntry(new CacheKey<>(toKey), - new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex)); + keyTable.addCacheEntry(new CacheKey<>(fromKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); - omClientResponse = new OMKeyRenameResponse(omResponse - .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(), - fromKeyName, toKeyName, fromKeyValue); + keyTable.addCacheEntry(new CacheKey<>(toKey), + new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex)); - result = Result.SUCCESS; - } + omClientResponse = new OMKeyRenameResponse(omResponse + .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(), + fromKeyName, toKeyName, fromKeyValue); + + result = Result.SUCCESS; } catch (IOException ex) { result = Result.FAILURE; exception = ex; @@ -257,10 +206,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } - if (result == Result.SUCCESS || result == Result.FAILURE) { - auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap, - exception, getOmRequest().getUserInfo())); - } + auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap, + exception, getOmRequest().getUserInfo())); switch (result) { case SUCCESS: @@ -268,15 +215,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, " fromKey:{} toKey:{}. ", volumeName, bucketName, fromKeyName, toKeyName); break; - case DELETE_FROM_KEY_ONLY: - LOG.debug("Replayed transaction {}: {}. Renamed Key {} already exists. " + - "Deleting old key {}.", trxnLogIndex, renameKeyRequest, toKey, - fromKey); - break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - renameKeyRequest); - break; case FAILURE: ozoneManager.getMetrics().incNumKeyRenameFails(); LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} " + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 0aec04dc608b..d863073cd524 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -27,11 +27,13 @@ import java.util.Collections; import java.util.EnumSet; import java.util.List; +import java.util.Map; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.PrefixManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -88,6 +90,15 @@ public OMKeyRequest(OMRequest omRequest) { super(omRequest); } + protected static KeyArgs resolveBucketLink( + OzoneManager ozoneManager, KeyArgs keyArgs, + Map auditMap) throws IOException { + ResolvedBucket bucket = ozoneManager.resolveBucketLink(keyArgs); + keyArgs = bucket.update(keyArgs); + bucket.audit(auditMap); + return keyArgs; + } + /** * This methods avoids multiple rpc calls to SCM by allocating multiple blocks * in one rpc call. @@ -470,38 +481,40 @@ protected void generateRequiredEncryptionInfo(KeyArgs keyArgs, // If KMS is not enabled, follow the normal approach of execution of not // reading DB in pre-execute. + + OmBucketInfo bucketInfo = null; if (ozoneManager.getKmsProvider() != null) { try { acquireLock = omMetadataManager.getLock().acquireReadLock( BUCKET_LOCK, volumeName, bucketName); - - OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( + bucketInfo = omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)); - - // Don't throw exception of bucket not found when bucketinfo is not - // null. If bucketinfo is null, later when request - // is submitted and if bucket does not really exist it will fail in - // applyTransaction step. Why we are doing this is if OM thinks it is - // the leader, but it is not, we don't want to fail request in this - // case. As anyway when it submits request to ratis it will fail with - // not leader exception, and client will retry on correct leader and - // request will be executed. - if (bucketInfo != null) { - Optional< FileEncryptionInfo > encryptionInfo = - getFileEncryptionInfo(ozoneManager, bucketInfo); - if (encryptionInfo.isPresent()) { - newKeyArgs.setFileEncryptionInfo( - OMPBHelper.convert(encryptionInfo.get())); - } - } } finally { if (acquireLock) { omMetadataManager.getLock().releaseReadLock( BUCKET_LOCK, volumeName, bucketName); } } + + // Don't throw exception of bucket not found when bucketinfo is + // null. If bucketinfo is null, later when request + // is submitted and if bucket does not really exist it will fail in + // applyTransaction step. Why we are doing this is if OM thinks it is + // the leader, but it is not, we don't want to fail request in this + // case. As anyway when it submits request to ratis it will fail with + // not leader exception, and client will retry on correct leader and + // request will be executed. + + if (bucketInfo != null) { + Optional encryptionInfo = + getFileEncryptionInfo(ozoneManager, bucketInfo); + if (encryptionInfo.isPresent()) { + newKeyArgs.setFileEncryptionInfo( + OMPBHelper.convert(encryptionInfo.get())); + } + } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index b5e8dc8cbc41..012df4960e42 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -18,45 +18,44 @@ package org.apache.hadoop.ozone.om.request.key; -import com.google.common.base.Preconditions; +import com.google.common.base.Optional; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse; import org.apache.hadoop.ozone.om.response.key.OMKeysDeleteResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeysRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; -import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Set; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.OzoneConsts.BUCKET; +import static org.apache.hadoop.ozone.OzoneConsts.DELETED_KEYS_LIST; +import static org.apache.hadoop.ozone.OzoneConsts.UNDELETED_KEYS_LIST; +import static org.apache.hadoop.ozone.OzoneConsts.VOLUME; +import static org.apache.hadoop.ozone.audit.OMAction.DELETE_KEYS; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; /** * Handles DeleteKey request. @@ -70,24 +69,6 @@ public OMKeysDeleteRequest(OMRequest omRequest) { super(omRequest); } - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - DeleteKeysRequest deleteKeyRequest = - getOmRequest().getDeleteKeysRequest(); - Preconditions.checkNotNull(deleteKeyRequest); - List newKeyArgsList = new ArrayList<>(); - for (KeyArgs keyArgs : deleteKeyRequest.getKeyArgsList()) { - newKeyArgsList.add( - keyArgs.toBuilder().setModificationTime(Time.now()).build()); - } - DeleteKeysRequest newDeleteKeyRequest = DeleteKeysRequest - .newBuilder().addAllKeyArgs(newKeyArgsList).build(); - - return getOmRequest().toBuilder() - .setDeleteKeysRequest(newDeleteKeyRequest) - .setUserInfo(getUserInfo()).build(); - } - @Override @SuppressWarnings("methodlength") public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, @@ -95,18 +76,22 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, DeleteKeysRequest deleteKeyRequest = getOmRequest().getDeleteKeysRequest(); - List deleteKeyArgsList = deleteKeyRequest.getKeyArgsList(); - Set unDeletedKeys = new HashSet<>(); + OzoneManagerProtocolProtos.DeleteKeyArgs deleteKeyArgs = + deleteKeyRequest.getDeleteKeys(); + + List deleteKeys = new ArrayList<>(deleteKeyArgs.getKeysList()); + IOException exception = null; OMClientResponse omClientResponse = null; Result result = null; OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumKeyDeletes(); - Map auditMap = null; - String volumeName = ""; - String bucketName = ""; - String keyName = ""; + String volumeName = deleteKeyArgs.getVolumeName(); + String bucketName = deleteKeyArgs.getBucketName(); + Map auditMap = new LinkedHashMap<>(); + auditMap.put(VOLUME, volumeName); + auditMap.put(BUCKET, bucketName); List omKeyInfoList = new ArrayList<>(); AuditLogger auditLogger = ozoneManager.getAuditLogger(); @@ -116,99 +101,139 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - try { - for (KeyArgs deleteKeyArgs : deleteKeyArgsList) { - volumeName = deleteKeyArgs.getVolumeName(); - bucketName = deleteKeyArgs.getBucketName(); - keyName = deleteKeyArgs.getKeyName(); - String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey); - omKeyInfoList.add(omKeyInfo); - unDeletedKeys.add(omKeyInfo); - } - - // Check if any of the key in the batch cannot be deleted. If exists the - // batch will delete failed. - for (KeyArgs deleteKeyArgs : deleteKeyArgsList) { - volumeName = deleteKeyArgs.getVolumeName(); - bucketName = deleteKeyArgs.getBucketName(); - keyName = deleteKeyArgs.getKeyName(); - auditMap = buildKeyArgsAuditMap(deleteKeyArgs); - // check Acl - checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, - IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); - String objectKey = omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName); + boolean acquiredLock = false; - // Validate bucket and volume exists or not. - validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + int indexFailed = 0; + int length = deleteKeys.size(); + OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys = + OzoneManagerProtocolProtos.DeleteKeyArgs.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName); + boolean deleteStatus = true; + try { + ResolvedBucket bucket = ozoneManager.resolveBucketLink( + Pair.of(volumeName, bucketName)); + bucket.audit(auditMap); + volumeName = bucket.realVolume(); + bucketName = bucket.realBucket(); + + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + // Validate bucket and volume exists or not. + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + for (indexFailed = 0; indexFailed < length; indexFailed++) { + String keyName = deleteKeyArgs.getKeys(indexFailed); + String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey); if (omKeyInfo == null) { - throw new OMException("Key not found: " + keyName, KEY_NOT_FOUND); + deleteStatus = false; + LOG.error("Received a request to delete a Key does not exist {}", + objectKey); + deleteKeys.remove(keyName); + unDeletedKeys.addKeys(keyName); + continue; } - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - throw new OMReplayException(); + try { + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, + IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); + omKeyInfoList.add(omKeyInfo); + } catch (Exception ex) { + deleteStatus = false; + LOG.error("Acl check failed for Key: {}", objectKey, ex); + deleteKeys.remove(keyName); + unDeletedKeys.addKeys(keyName); } } + // Mark all keys which can be deleted, in cache as deleted. + for (OmKeyInfo omKeyInfo : omKeyInfoList) { + omMetadataManager.getKeyTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, + omKeyInfo.getKeyName())), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + } + omClientResponse = new OMKeysDeleteResponse(omResponse - .setDeleteKeysResponse(DeleteKeysResponse.newBuilder()).build(), - omKeyInfoList, trxnLogIndex, ozoneManager.isRatisEnabled()); + .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)) + .setStatus(deleteStatus ? OK : PARTIAL_DELETE) + .setSuccess(deleteStatus).build(), + omKeyInfoList, trxnLogIndex, + ozoneManager.isRatisEnabled()); + result = Result.SUCCESS; + } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMKeyDeleteResponse(createReplayOMResponse( - omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - - omClientResponse = new OMKeyDeleteResponse( - createOperationKeysErrorOMResponse(omResponse, exception, - unDeletedKeys)); + result = Result.FAILURE; + exception = ex; + createErrorOMResponse(omResponse, ex); + + // reset deleteKeys as request failed. + deleteKeys = new ArrayList<>(); + // Add all keys which are failed due to any other exception . + for (int i = indexFailed; i < length; i++) { + unDeletedKeys.addKeys(deleteKeyArgs.getKeys(i)); } + omResponse.setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(false).setUnDeletedKeys(unDeletedKeys).build()).build(); + omClientResponse = new OMKeysDeleteResponse(omResponse.build()); + } finally { + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); } - // Performing audit logging outside of the lock. - if (result != Result.REPLAY) { - auditLog(auditLogger, buildAuditMessage( - OMAction.DELETE_KEY, auditMap, exception, userInfo)); - } + addDeletedKeys(auditMap, deleteKeys, unDeletedKeys.getKeysList()); + + auditLog(auditLogger, buildAuditMessage(DELETE_KEYS, auditMap, exception, + userInfo)); + switch (result) { case SUCCESS: - omMetrics.decNumKeys(); - LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName, - bucketName, keyName); - break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", - trxnLogIndex, deleteKeyRequest); + omMetrics.decNumKeys(deleteKeys.size()); + if (LOG.isDebugEnabled()) { + LOG.debug("Keys delete success. Volume:{}, Bucket:{}, Keys:{}", + volumeName, bucketName, auditMap.get(DELETED_KEYS_LIST)); + } break; case FAILURE: + omMetrics.decNumKeys(deleteKeys.size()); omMetrics.incNumKeyDeleteFails(); - LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key{}." + - " Exception:{}", volumeName, bucketName, keyName, exception); + if (LOG.isDebugEnabled()) { + LOG.debug("Keys delete failed. Volume:{}, Bucket:{}, DeletedKeys:{}, " + + "UnDeletedKeys:{}", volumeName, bucketName, + auditMap.get(DELETED_KEYS_LIST), auditMap.get(UNDELETED_KEYS_LIST), + exception); + } break; default: - LOG.error("Unrecognized Result for OMKeyDeleteRequest: {}", + LOG.error("Unrecognized Result for OMKeysDeleteRequest: {}", deleteKeyRequest); } return omClientResponse; } + + /** + * Add key info to audit map for DeleteKeys request. + */ + private static void addDeletedKeys( + Map auditMap, List deletedKeys, + List unDeletedKeys) { + auditMap.put(DELETED_KEYS_LIST, String.join(",", deletedKeys)); + auditMap.put(UNDELETED_KEYS_LIST, String.join(",", unDeletedKeys)); + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java new file mode 100644 index 000000000000..dbcde6d4ce14 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java @@ -0,0 +1,271 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeysRenameResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_RENAME; +import static org.apache.hadoop.ozone.OzoneConsts.RENAMED_KEYS_MAP; +import static org.apache.hadoop.ozone.OzoneConsts.UNRENAMED_KEYS_MAP; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles rename keys request. + */ +public class OMKeysRenameRequest extends OMKeyRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeysRenameRequest.class); + + public OMKeysRenameRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + + RenameKeysRequest renameKeysRequest = getOmRequest().getRenameKeysRequest(); + RenameKeysArgs renameKeysArgs = renameKeysRequest.getRenameKeysArgs(); + String volumeName = renameKeysArgs.getVolumeName(); + String bucketName = renameKeysArgs.getBucketName(); + OMClientResponse omClientResponse = null; + + List unRenamedKeys = new ArrayList<>(); + + // fromKeyName -> toKeyName + Map renamedKeys = new HashMap<>(); + + Map fromKeyAndToKeyInfo = new HashMap<>(); + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumKeyRenames(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + IOException exception = null; + OmKeyInfo fromKeyValue = null; + Result result = null; + Map auditMap = new LinkedHashMap<>(); + String fromKeyName = null; + String toKeyName = null; + boolean acquiredLock = false; + boolean renameStatus = true; + + try { + ResolvedBucket bucket = ozoneManager.resolveBucketLink( + Pair.of(volumeName, bucketName)); + bucket.audit(auditMap); + volumeName = bucket.realVolume(); + bucketName = bucket.realBucket(); + acquiredLock = + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + + for (RenameKeysMap renameKey : renameKeysArgs.getRenameKeysMapList()) { + + fromKeyName = renameKey.getFromKeyName(); + toKeyName = renameKey.getToKeyName(); + RenameKeysMap.Builder unRenameKey = RenameKeysMap.newBuilder(); + + if (toKeyName.length() == 0 || fromKeyName.length() == 0) { + renameStatus = false; + unRenamedKeys.add( + unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName) + .build()); + LOG.error("Key name is empty fromKeyName {} toKeyName {}", + fromKeyName, toKeyName); + continue; + } + + try { + // check Acls to see if user has access to perform delete operation + // on old key and create operation on new key + checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName, + IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); + checkKeyAcls(ozoneManager, volumeName, bucketName, toKeyName, + IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); + } catch (Exception ex) { + renameStatus = false; + unRenamedKeys.add( + unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName) + .build()); + LOG.error("Acl check failed for fromKeyName {} toKeyName {}", + fromKeyName, toKeyName, ex); + continue; + } + + // Check if toKey exists + String fromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, + fromKeyName); + String toKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName); + OmKeyInfo toKeyValue = omMetadataManager.getKeyTable().get(toKey); + + if (toKeyValue != null) { + + renameStatus = false; + unRenamedKeys.add( + unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName) + .build()); + LOG.error("Received a request name of new key {} already exists", + toKeyName); + } + + // fromKeyName should exist + fromKeyValue = omMetadataManager.getKeyTable().get(fromKey); + if (fromKeyValue == null) { + renameStatus = false; + unRenamedKeys.add( + unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName) + .build()); + LOG.error("Received a request to rename a Key does not exist {}", + fromKey); + continue; + } + + fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + fromKeyValue.setKeyName(toKeyName); + + //Set modification time + fromKeyValue.setModificationTime(Time.now()); + + // Add to cache. + // fromKey should be deleted, toKey should be added with newly updated + // omKeyInfo. + Table keyTable = omMetadataManager.getKeyTable(); + keyTable.addCacheEntry(new CacheKey<>(fromKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + keyTable.addCacheEntry(new CacheKey<>(toKey), + new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex)); + renamedKeys.put(fromKeyName, toKeyName); + fromKeyAndToKeyInfo.put(fromKeyName, fromKeyValue); + } + + OmRenameKeys newOmRenameKeys = + new OmRenameKeys(volumeName, bucketName, null, fromKeyAndToKeyInfo); + omClientResponse = new OMKeysRenameResponse(omResponse + .setRenameKeysResponse(RenameKeysResponse.newBuilder() + .setStatus(renameStatus) + .addAllUnRenamedKeys(unRenamedKeys)) + .setStatus(renameStatus ? OK : PARTIAL_RENAME) + .setSuccess(renameStatus).build(), + newOmRenameKeys); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + createErrorOMResponse(omResponse, ex); + + omResponse.setRenameKeysResponse(RenameKeysResponse.newBuilder() + .setStatus(renameStatus).addAllUnRenamedKeys(unRenamedKeys).build()); + omClientResponse = new OMKeysRenameResponse(omResponse.build()); + + } finally { + addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, + omDoubleBufferHelper); + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } + } + + auditMap = buildAuditMap(auditMap, renamedKeys, unRenamedKeys); + auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEYS, auditMap, + exception, getOmRequest().getUserInfo())); + + switch (result) { + case SUCCESS: + LOG.debug("Rename Keys is successfully completed for auditMap:{}.", + auditMap); + break; + case FAILURE: + ozoneManager.getMetrics().incNumKeyRenameFails(); + LOG.error("Rename keys failed for auditMap:{}.", auditMap); + break; + default: + LOG.error("Unrecognized Result for OMKeysRenameRequest: {}", + renameKeysRequest); + } + + return omClientResponse; + } + + /** + * Build audit map for RenameKeys request. + * + * @param auditMap + * @param renamedKeys + * @param unRenameKeys + * @return + */ + private Map buildAuditMap(Map auditMap, + Map renamedKeys, + List unRenameKeys) { + Map unRenameKeysMap = new HashMap<>(); + for (RenameKeysMap renameKeysMap : unRenameKeys) { + unRenameKeysMap.put(renameKeysMap.getFromKeyName(), + renameKeysMap.getToKeyName()); + } + auditMap.put(RENAMED_KEYS_MAP, renamedKeys.toString()); + auditMap.put(UNRENAMED_KEYS_MAP, unRenameKeysMap.toString()); + return auditMap; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java index eac7842f84e2..232a0fb6c0e4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java @@ -21,6 +21,8 @@ import java.io.IOException; import com.google.common.base.Preconditions; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.response.key.OMTrashRecoverResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -86,6 +88,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, boolean acquireLock = false; OMClientResponse omClientResponse = null; try { + ResolvedBucket bucket = ozoneManager.resolveBucketLink( + Pair.of(volumeName, destinationBucket)); + volumeName = bucket.realVolume(); + destinationBucket = bucket.realBucket(); + // Check acl for the destination bucket. checkBucketAcls(ozoneManager, volumeName, destinationBucket, keyName, IAccessAuthorizer.ACLType.WRITE); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index 025c25843adc..9fae4988fa68 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.OMClientRequest; @@ -93,14 +92,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, throw new OMException(OMException.ResultCodes.KEY_NOT_FOUND); } - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) { - throw new OMReplayException(); - } - operationResult = apply(omKeyInfo, trxnLogIndex); omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -112,14 +103,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = onSuccess(omResponse, omKeyInfo, operationResult); result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = onReplay(omResponse); - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = onFailure(omResponse, ex); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = onFailure(omResponse, ex); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -170,10 +156,6 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, return new OMKeyAclResponse(createErrorOMResponse(omResponse, exception)); } - OMClientResponse onReplay(OMResponse.Builder omResponse) { - return new OMKeyAclResponse(createReplayOMResponse(omResponse)); - } - /** * Completion hook for final processing before return without lock. * Usually used for logging without lock and metric update. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java index 444c0df6efd7..3697cb8f98d2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java @@ -87,12 +87,6 @@ void onComplete(Result result, boolean operationResult, } } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Add acl {} to path {} failed!", ozoneAcls, path, exception); break; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java index 18e999d450a2..67b891aa73b9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java @@ -88,12 +88,6 @@ void onComplete(Result result, boolean operationResult, } } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Remove acl {} to path {} failed!", ozoneAcls, path, exception); break; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java index d8dbe773428a..70f7b28bc0b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java @@ -84,12 +84,6 @@ void onComplete(Result result, boolean operationResult, LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, path); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Set acl {} to path {} failed!", ozoneAcls, path, exception); break; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 7cde2c257e9f..e928402643ef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -26,12 +26,10 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.PrefixManagerImpl; import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -86,14 +84,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omPrefixInfo = omMetadataManager.getPrefixTable().get(prefixPath); - // Check if this transaction is a replay of ratis logs. - if (omPrefixInfo != null) { - if (isReplay(ozoneManager, omPrefixInfo, trxnLogIndex)) { - // This is a replayed transaction. Return dummy response. - throw new OMReplayException(); - } - } - try { operationResult = apply(prefixManager, omPrefixInfo, trxnLogIndex); } catch (IOException ex) { @@ -129,14 +119,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = onReplay(omResponse); - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = onFailure(omResponse, ex); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = onFailure(omResponse, ex); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -186,15 +171,6 @@ abstract OMClientResponse onSuccess( abstract OMClientResponse onFailure(OMResponse.Builder omResponse, IOException exception); - /** - * Get the OM Client Response on replayed transactions. - * @param omResonse - * @return OMClientResponse - */ - OMClientResponse onReplay(OMResponse.Builder omResonse) { - return new OMPrefixAclResponse(createReplayOMResponse(omResonse)); - } - /** * Completion hook for final processing before return without lock. * Usually used for logging without lock and metric update. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java index bd25e07d3f4a..7160042b0974 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java @@ -106,12 +106,6 @@ void onComplete(boolean operationResult, IOException exception, } } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: omMetrics.incNumBucketUpdateFails(); LOG.error("Add acl {} to path {} failed!", ozoneAcls, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java index 72c199cc9b57..3731ad17dcf4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java @@ -103,12 +103,6 @@ void onComplete(boolean operationResult, IOException exception, } } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: omMetrics.incNumBucketUpdateFails(); LOG.error("Remove acl {} to path {} failed!", ozoneAcls, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java index 122ada18ac3c..44bc43b52120 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java @@ -99,12 +99,6 @@ void onComplete(boolean operationResult, IOException exception, ozoneObj.getPath()); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: omMetrics.incNumBucketUpdateFails(); LOG.error("Set acl {} to path {} failed!", ozoneAcls, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index f51cba8f3478..08063b6b4d9b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; @@ -33,7 +34,7 @@ import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -48,8 +49,10 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Map; import java.util.UUID; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; /** @@ -66,15 +69,17 @@ public S3InitiateMultipartUploadRequest(OMRequest omRequest) { } @Override - public OMRequest preExecute(OzoneManager ozoneManager) { + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest(); Preconditions.checkNotNull(multipartInfoInitiateRequest); - OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs = - multipartInfoInitiateRequest.getKeyArgs().toBuilder() + KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs(); + KeyArgs.Builder newKeyArgs = keyArgs.toBuilder() .setMultipartUploadID(UUID.randomUUID().toString() + "-" + - UniqueId.next()).setModificationTime(Time.now()); + UniqueId.next()).setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName())); return getOmRequest().toBuilder() .setUserInfo(getUserInfo()) @@ -91,13 +96,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest(); - OzoneManagerProtocolProtos.KeyArgs keyArgs = + KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs(); Preconditions.checkNotNull(keyArgs.getMultipartUploadID()); + Map auditMap = buildKeyArgsAuditMap(keyArgs); + String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); + final String requestedVolume = volumeName; + final String requestedBucket = bucketName; String keyName = keyArgs.getKeyName(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); @@ -114,17 +123,27 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getOmRequest()); OMClientResponse omClientResponse = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // TODO to support S3 ACL later. acquiredBucketLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - // We do not check if this transaction is a replay here to avoid extra - // DB reads. Even if this transaction is replayed, in - // S3MultipartUploadComplete request, we would delete this entry from - // the openKeyTable. Hence, it is safe to replay this transaction here. + // If KMS is configured and TDE is enabled on bucket, throw MPU not + // supported. + if (ozoneManager.getKmsProvider() != null) { + if (omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName)) + .getEncryptionKeyInfo() != null) { + throw new OMException("MultipartUpload is not yet supported on " + + "encrypted buckets", NOT_SUPPORTED_OPERATION); + } + } // We are adding uploadId to key, because if multiple users try to // perform multipart upload on the same key, each will try to upload, who @@ -141,8 +160,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // multipart upload request is received, it returns multipart upload id // for the key. - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, keyArgs.getMultipartUploadID()); + String multipartKey = omMetadataManager.getMultipartKey( + volumeName, bucketName, keyName, + keyArgs.getMultipartUploadID()); // Even if this key already exists in the KeyTable, it would be taken // care of in the final complete multipart upload. AWS S3 behavior is @@ -159,8 +179,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .build(); omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) + .setVolumeName(volumeName) + .setBucketName(bucketName) .setKeyName(keyArgs.getKeyName()) .setCreationTime(keyArgs.getModificationTime()) .setModificationTime(keyArgs.getModificationTime()) @@ -185,8 +205,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new S3InitiateMultipartUploadResponse( omResponse.setInitiateMultiPartUploadResponse( MultipartInfoInitiateResponse.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) + .setVolumeName(requestedVolume) + .setBucketName(requestedBucket) .setKeyName(keyName) .setMultipartUploadID(keyArgs.getMultipartUploadID())) .build(), multipartKeyInfo, omKeyInfo); @@ -201,14 +221,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper); if (acquiredBucketLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, + volumeName, bucketName); } } // audit log auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.INITIATE_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), + OMAction.INITIATE_MULTIPART_UPLOAD, auditMap, exception, getOmRequest().getUserInfo())); switch (result) { @@ -222,6 +242,7 @@ OMAction.INITIATE_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), LOG.error("S3 InitiateMultipart Upload request for Key {} in " + "Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName, exception); + break; default: LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}", multipartInfoInitiateRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 8c8e0103fbf4..c0ef8b378261 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import java.io.IOException; +import java.util.Map; import com.google.common.base.Optional; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; @@ -72,7 +73,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { return getOmRequest().toBuilder().setAbortMultiPartUploadRequest( getOmRequest().getAbortMultiPartUploadRequest().toBuilder() - .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()))) + .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), + keyArgs.getKeyName())))) .setUserInfo(getUserInfo()).build(); } @@ -85,9 +89,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .getAbortMultiPartUploadRequest(); OzoneManagerProtocolProtos.KeyArgs keyArgs = multipartUploadAbortRequest .getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); + final String requestedVolume = volumeName; + final String requestedBucket = bucketName; String keyName = keyArgs.getKeyName(); ozoneManager.getMetrics().incNumAbortMultipartUploads(); @@ -101,15 +108,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMClientResponse omClientResponse = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // TODO to support S3 ACL later. acquiredLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, keyArgs.getMultipartUploadID()); + multipartKey = omMetadataManager.getMultipartKey( + volumeName, bucketName, keyName, keyArgs.getMultipartUploadID()); OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(multipartKey); @@ -118,30 +129,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // upload initiated for this key. if (omKeyInfo == null) { throw new OMException("Abort Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, + requestedVolume + "bucket: " + requestedBucket + "key: " + keyName, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } - // We do not check if this transaction is a replay. If OmKeyInfo - // exists, then we should delete it from OpenKeyTable irrespective of - // whether this transaction is a replay. There are 3 scenarios: - // Trxn 1 : Initiate Multipart Upload request for key1 - // (openKey = openKey1) - // Trxn 2 : Abort Multipart Upload request for opneKey1 - // - // Scenario 1 : This is not a replay transaction. - // omKeyInfo is not null and we proceed with the abort request to - // deleted openKey1 from openKeyTable. - // Scenario 2 : Trxn 1 and 2 are replayed. - // Replay of Trxn 1 would create openKey1 in openKeyTable as we do - // not check for replay in S3InitiateMultipartUploadRequest. - // Hence, we should replay Trxn 2 also to maintain consistency. - // Scenario 3 : Trxn 2 is replayed and not Trxn 1. - // This will result in omKeyInfo == null as openKey1 would already - // have been deleted from openKeyTable. - // So in both scenarios 1 and 2 (omKeyInfo not null), we should go - // ahead with this request irrespective of whether it is a replay or not. - multipartKeyInfo = omMetadataManager.getMultipartInfoTable() .get(multipartKey); multipartKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -172,14 +163,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, + volumeName, bucketName); } } // audit log auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.ABORT_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), + OMAction.ABORT_MULTIPART_UPLOAD, auditMap, exception, getOmRequest().getUserInfo())); switch (result) { @@ -193,6 +184,7 @@ OMAction.ABORT_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), LOG.error("Abort Multipart request is failed for KeyName {} in " + "VolumeName/Bucket {}/{}", keyName, volumeName, bucketName, exception); + break; default: LOG.error("Unrecognized Result for S3MultipartUploadAbortRequest: {}", multipartUploadAbortRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index d9004c0c13a0..1e29d5f3f212 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; @@ -71,14 +70,17 @@ public S3MultipartUploadCommitPartRequest(OMRequest omRequest) { } @Override - public OMRequest preExecute(OzoneManager ozoneManager) { + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = getOmRequest().getCommitMultiPartUploadRequest(); + KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs(); return getOmRequest().toBuilder().setCommitMultiPartUploadRequest( multipartCommitUploadPartRequest.toBuilder() - .setKeyArgs(multipartCommitUploadPartRequest.getKeyArgs() - .toBuilder().setModificationTime(Time.now()))) + .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), + keyArgs.getKeyName())))) .setUserInfo(getUserInfo()).build(); } @@ -90,6 +92,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, getOmRequest().getCommitMultiPartUploadRequest(); KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); @@ -112,6 +115,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmMultipartKeyInfo multipartKeyInfo = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + // TODO to support S3 ACL later. acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName); @@ -119,29 +126,23 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, validateBucketAndVolume(omMetadataManager, volumeName, bucketName); String uploadID = keyArgs.getMultipartUploadID(); - multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); + multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, + keyName, uploadID); multipartKeyInfo = omMetadataManager.getMultipartInfoTable() .get(multipartKey); long clientID = multipartCommitUploadPartRequest.getClientID(); - openKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, - clientID); + openKey = omMetadataManager.getOpenKey( + volumeName, bucketName, keyName, clientID); + + String ozoneKey = omMetadataManager.getOzoneKey( + volumeName, bucketName, keyName); omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); if (omKeyInfo == null) { - // Check the KeyTable if this transaction is a replay of ratis logs. - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, - bucketName, keyName); - OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - if (dbKeyInfo != null) { - if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) { - throw new OMReplayException(); - } - } throw new OMException("Failed to commit Multipart Upload key, as " + openKey + "entry is not found in the openKey table", KEY_NOT_FOUND); @@ -157,8 +158,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // Set the UpdateID to current transactionLogIndex omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); - partName = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName) + clientID; + partName = ozoneKey + clientID; if (multipartKeyInfo == null) { // This can occur when user started uploading part by the time commit @@ -212,41 +212,37 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setPartName(partName)); omClientResponse = new S3MultipartUploadCommitPartResponse( omResponse.build(), multipartKey, openKey, - multipartKeyInfo, oldPartKeyInfo, ozoneManager.isRatisEnabled()); + multipartKeyInfo, oldPartKeyInfo, omKeyInfo, + ozoneManager.isRatisEnabled()); result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new S3MultipartUploadCommitPartResponse( - createReplayOMResponse(omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = new S3MultipartUploadCommitPartResponse( - createErrorOMResponse(omResponse, exception), openKey, omKeyInfo, - ozoneManager.isRatisEnabled()); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3MultipartUploadCommitPartResponse( + createErrorOMResponse(omResponse, exception), multipartKey, openKey, + multipartKeyInfo, oldPartKeyInfo, omKeyInfo, + ozoneManager.isRatisEnabled()); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, + volumeName, bucketName); } } // audit log + // Add MPU related information. + auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER, + String.valueOf(keyArgs.getMultipartNumber())); + auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NAME, partName); auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, - buildAuditMap(keyArgs, partName), exception, + auditMap, exception, getOmRequest().getUserInfo())); switch (result) { - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", - trxnLogIndex, multipartCommitUploadPartRequest); - break; case SUCCESS: LOG.debug("MultipartUpload Commit is successfully for Key:{} in " + "Volume/Bucket {}/{}", keyName, volumeName, bucketName); @@ -254,7 +250,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, case FAILURE: ozoneManager.getMetrics().incNumCommitMultipartUploadPartFails(); LOG.error("MultipartUpload Commit is failed for Key:{} in " + - "Volume/Bucket {}/{}", keyName, volumeName, bucketName, exception); + "Volume/Bucket {}/{}", keyName, volumeName, bucketName, + exception); break; default: LOG.error("Unrecognized Result for S3MultipartUploadCommitPartRequest: " + @@ -264,15 +261,5 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, return omClientResponse; } - private Map buildAuditMap(KeyArgs keyArgs, String partName) { - Map auditMap = buildKeyArgsAuditMap(keyArgs); - - // Add MPU related information. - auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER, - String.valueOf(keyArgs.getMultipartNumber())); - auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NAME, partName); - - return auditMap; - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 17a8c6154b9b..83cc28b01070 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -68,13 +67,6 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class); - private enum Result { - SUCCESS, - REPLAY, - DELETE_OPEN_KEY_ONLY, - FAILURE - } - public S3MultipartUploadCompleteRequest(OMRequest omRequest) { super(omRequest); } @@ -89,7 +81,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { return getOmRequest().toBuilder() .setCompleteMultiPartUploadRequest(multipartUploadCompleteRequest .toBuilder().setKeyArgs(keyArgs.toBuilder() - .setModificationTime(Time.now()))) + .setModificationTime(Time.now()) + .setKeyName(validateAndNormalizeKey( + ozoneManager.getEnableFileSystemPaths(), + keyArgs.getKeyName())))) .setUserInfo(getUserInfo()).build(); } @@ -104,19 +99,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, List partsList = multipartUploadCompleteRequest.getPartsListList(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); String volumeName = keyArgs.getVolumeName(); String bucketName = keyArgs.getBucketName(); + final String requestedVolume = volumeName; + final String requestedBucket = bucketName; String keyName = keyArgs.getKeyName(); String uploadID = keyArgs.getMultipartUploadID(); + String multipartKey = null; ozoneManager.getMetrics().incNumCompleteMultipartUploads(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); boolean acquiredLock = false; OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( @@ -125,6 +120,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, IOException exception = null; Result result = null; try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + multipartKey = omMetadataManager.getMultipartKey(volumeName, + bucketName, keyName, uploadID); + // TODO to support S3 ACL later. acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, @@ -132,42 +134,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - if (omKeyInfo != null) { - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) { - // During S3InitiateMultipartUpload or KeyCreate, we do not check - // the OpenKey Table for replay. This is so as to avoid an extra - // DB read during KeyCreate. - // If this transaction is a replay, the S3InitiateMultipartUpload - // and part key KeyCreate request could also have been replayed. - // And since we do not check for replay there, we should scrub - // the key from OpenKey table and MultipartInfo table now, if it - // exists. - - OmKeyInfo openMultipartKeyInfo = omMetadataManager - .getOpenKeyTable().get(multipartKey); - if (openMultipartKeyInfo != null) { - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); - omMetadataManager.getMultipartInfoTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); - - throw new OMReplayException(true); - } - throw new OMReplayException(false); - } - } + String ozoneKey = omMetadataManager.getOzoneKey( + volumeName, bucketName, keyName); OmMultipartKeyInfo multipartKeyInfo = omMetadataManager .getMultipartInfoTable().get(multipartKey); if (multipartKeyInfo == null) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName), OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } TreeMap partKeyInfoMap = @@ -178,8 +153,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, LOG.error("Complete MultipartUpload failed for key {} , MPU Key has" + " no parts in OM, parts given to upload are {}", ozoneKey, partsList); - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName), OMException.ResultCodes.INVALID_PART); } @@ -195,9 +170,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, "partNumber at index {} is {} for ozonekey is " + "{}", i, currentPartNumber, i - 1, prevPartNumber, ozoneKey); - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName + - "because parts are in Invalid order.", + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName) + + " because parts are in Invalid order.", OMException.ResultCodes.INVALID_PART_ORDER); } prevPartNumber = currentPartNumber; @@ -220,10 +195,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, !partName.equals(partKeyInfo.getPartName())) { String omPartName = partKeyInfo == null ? null : partKeyInfo.getPartName(); - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName + + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName) + ". Provided Part info is { " + partName + ", " + partNumber + - "}, where as OM has partName " + omPartName, + "}, whereas OM has partName " + omPartName, OMException.ResultCodes.INVALID_PART); } @@ -238,9 +213,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(), OzoneConsts.OM_MULTIPART_MIN_SIZE); - throw new OMException("Complete Multipart Upload Failed: " + - "Entity too small: volume: " + volumeName + "bucket: " + - bucketName + "key: " + keyName, + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName) + + ". Entity too small.", OMException.ResultCodes.ENTITY_TOO_SMALL); } } @@ -259,6 +234,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, HddsProtos.ReplicationFactor factor = partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor(); + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); if (omKeyInfo == null) { // This is a newly added key, it does not have any versions. OmKeyLocationInfoGroup keyLocationInfoGroup = new @@ -312,8 +288,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omResponse.setCompleteMultiPartUploadResponse( MultipartUploadCompleteResponse.newBuilder() - .setVolume(volumeName) - .setBucket(bucketName) + .setVolume(requestedVolume) + .setBucket(requestedBucket) .setKey(keyName) .setHash(DigestUtils.sha256Hex(keyName))); @@ -322,66 +298,44 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, result = Result.SUCCESS; } else { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName + - "because of empty part list", + throw new OMException( + failureMessage(requestedVolume, requestedBucket, keyName) + + " because of empty part list", OMException.ResultCodes.INVALID_REQUEST); } } catch (IOException ex) { - if (ex instanceof OMReplayException) { - if (((OMReplayException) ex).isDBOperationNeeded()) { - result = Result.DELETE_OPEN_KEY_ONLY; - omClientResponse = new S3MultipartUploadCompleteResponse( - omResponse.build(), multipartKey); - } else { - result = Result.REPLAY; - omClientResponse = new S3MultipartUploadCompleteResponse( - createReplayOMResponse(omResponse)); - } - } else { - result = Result.FAILURE; - exception = ex; - omClientResponse = new S3MultipartUploadCompleteResponse( - createErrorOMResponse(omResponse, exception)); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3MultipartUploadCompleteResponse( + createErrorOMResponse(omResponse, exception)); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, + volumeName, bucketName); } } - if (result != Result.REPLAY && result != Result.DELETE_OPEN_KEY_ONLY) { - Map auditMap = buildKeyArgsAuditMap(keyArgs); - auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString()); + auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString()); - // audit log - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.COMPLETE_MULTIPART_UPLOAD, auditMap, exception, - getOmRequest().getUserInfo())); - } + // audit log + auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.COMPLETE_MULTIPART_UPLOAD, auditMap, exception, + getOmRequest().getUserInfo())); switch (result) { case SUCCESS: - LOG.debug("MultipartUpload Complete request is successfull for Key: {} " + + LOG.debug("MultipartUpload Complete request is successful for Key: {} " + "in Volume/Bucket {}/{}", keyName, volumeName, bucketName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", - trxnLogIndex, multipartUploadCompleteRequest); - break; - case DELETE_OPEN_KEY_ONLY: - LOG.debug("Replayed Transaction {}. Deleting old key {} from OpenKey " + - "table and MultipartInfo table. Request: {}", trxnLogIndex, - multipartKey, multipartUploadCompleteRequest); - break; case FAILURE: ozoneManager.getMetrics().incNumCompleteMultipartUploadFails(); LOG.error("MultipartUpload Complete request failed for Key: {} " + - "in Volume/Bucket {}/{}", keyName, volumeName, bucketName, exception); + "in Volume/Bucket {}/{}", keyName, volumeName, bucketName, + exception); + break; default: LOG.error("Unrecognized Result for S3MultipartUploadCommitRequest: {}", multipartUploadCompleteRequest); @@ -390,6 +344,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, return omClientResponse; } + private static String failureMessage(String volume, String bucket, + String keyName) { + return "Complete Multipart Upload Failed: volume: " + + volume + " bucket: " + bucket + " key: " + keyName; + } + private void updateCache(OMMetadataManager omMetadataManager, String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo, long transactionLogIndex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index 765a20cfca30..7e2ccd99eec7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -147,11 +147,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - OmVolumeArgs dbVolumeArgs = - omMetadataManager.getVolumeTable().get(dbVolumeKey); - UserVolumeInfo volumeList = null; - if (dbVolumeArgs == null) { + if (omMetadataManager.getVolumeTable().isExist(dbVolumeKey)) { + LOG.debug("volume:{} already exists", omVolumeArgs.getVolume()); + throw new OMException("Volume already exists", + OMException.ResultCodes.VOLUME_ALREADY_EXISTS); + } else { String dbUserKey = omMetadataManager.getUserKey(owner); volumeList = omMetadataManager.getUserTable().get(dbUserKey); volumeList = addVolumeToOwnerList(volumeList, volume, owner, @@ -164,20 +165,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new OMVolumeCreateResponse(omResponse.build(), omVolumeArgs, volumeList); LOG.debug("volume:{} successfully created", omVolumeArgs.getVolume()); - } else { - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, dbVolumeArgs, transactionLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, createVolumeRequest); - return new OMVolumeCreateResponse(createReplayOMResponse(omResponse)); - } else { - LOG.debug("volume:{} already exists", omVolumeArgs.getVolume()); - throw new OMException("Volume already exists", - OMException.ResultCodes.VOLUME_ALREADY_EXISTS); - } } } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java index 4d2f0557ea6e..ce93e269e250 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -99,16 +99,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmVolumeArgs omVolumeArgs = getVolumeInfo(omMetadataManager, volume); - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omVolumeArgs, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, deleteVolumeRequest); - return new OMVolumeDeleteResponse(createReplayOMResponse(omResponse)); - } - owner = omVolumeArgs.getOwnerName(); acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(USER_LOCK, owner); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java index 1eea419ef607..6873086750a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -18,40 +18,34 @@ package org.apache.hadoop.ozone.om.request.volume; -import java.io.IOException; -import java.util.Map; - -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import com.google.common.base.Optional; import com.google.common.base.Preconditions; - +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetOwnerResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; @@ -124,30 +118,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } long maxUserVolumeCount = ozoneManager.getMaxUserVolumeCount(); - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); OzoneManagerProtocolProtos.UserVolumeInfo oldOwnerVolumeList = null; OzoneManagerProtocolProtos.UserVolumeInfo newOwnerVolumeList = null; OmVolumeArgs omVolumeArgs = null; acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock( VOLUME_LOCK, volume); - omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); - if (omVolumeArgs == null) { - LOG.debug("Changing volume ownership failed for user:{} volume:{}", - newOwner, volume); - throw new OMException("Volume " + volume + " is not found", - OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omVolumeArgs, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, setVolumePropertyRequest); - return new OMVolumeSetOwnerResponse(createReplayOMResponse(omResponse)); - } + omVolumeArgs = getVolumeInfo(omMetadataManager, volume); oldOwner = omVolumeArgs.getOwnerName(); // Return OK immediately if newOwner is the same as oldOwner. @@ -194,7 +171,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new CacheValue<>(Optional.of(oldOwnerVolumeList), transactionLogIndex)); omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), + new CacheKey<>(omMetadataManager.getVolumeKey(volume)), new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); omResponse.setSetVolumePropertyResponse( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index 7e0cb72e237d..746a1a60f0a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetQuotaResponse; @@ -127,23 +126,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, acquireVolumeLock = omMetadataManager.getLock().acquireWriteLock( VOLUME_LOCK, volume); - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); - if (omVolumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omVolumeArgs, transactionLogIndex)) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", - transactionLogIndex, setVolumePropertyRequest); - return new OMVolumeSetQuotaResponse(createReplayOMResponse(omResponse)); - } + omVolumeArgs = getVolumeInfo(omMetadataManager, volume); omVolumeArgs.setQuotaInBytes(setVolumePropertyRequest.getQuotaInBytes()); omVolumeArgs.setUpdateID(transactionLogIndex, @@ -153,7 +137,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // update cache. omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), + new CacheKey<>(omMetadataManager.getVolumeKey(volume)), new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); omResponse.setSetVolumePropertyResponse( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index f2610e57e7c9..de7f0c0a36d0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -25,12 +25,10 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.volume.OMVolumeRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -46,7 +44,7 @@ /** * Base class for OMVolumeAcl Request. */ -public abstract class OMVolumeAclRequest extends OMClientRequest { +public abstract class OMVolumeAclRequest extends OMVolumeRequest { private CheckedBiFunction, OmVolumeArgs, IOException> omVolumeAclOp; @@ -84,19 +82,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } lockAcquired = omMetadataManager.getLock().acquireWriteLock( VOLUME_LOCK, volume); - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); - if (omVolumeArgs == null) { - throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - // Check if this transaction is a replay of ratis logs. - // If this is a replay, then the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - if (isReplay(ozoneManager, omVolumeArgs, trxnLogIndex)) { - throw new OMReplayException(); - } + omVolumeArgs = getVolumeInfo(omMetadataManager, volume); // result is false upon add existing acl or remove non-existing acl boolean applyAcl = true; @@ -106,27 +92,23 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, applyAcl = false; } - // We set the updateID even if applyAcl = false to catch the replay - // transactions. - omVolumeArgs.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + // Update only when + if (applyAcl) { + omVolumeArgs.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); - // update cache. - omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), - new CacheValue<>(Optional.of(omVolumeArgs), trxnLogIndex)); + // update cache. + omMetadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getVolumeKey(volume)), + new CacheValue<>(Optional.of(omVolumeArgs), trxnLogIndex)); + } omClientResponse = onSuccess(omResponse, omVolumeArgs, applyAcl); result = Result.SUCCESS; } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = onReplay(omResponse); - } else { - result = Result.FAILURE; - exception = ex; - omMetrics.incNumVolumeUpdateFails(); - omClientResponse = onFailure(omResponse, ex); - } + result = Result.FAILURE; + exception = ex; + omMetrics.incNumVolumeUpdateFails(); + omClientResponse = onFailure(omResponse, ex); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); @@ -183,10 +165,6 @@ abstract OMClientResponse onSuccess( abstract OMClientResponse onFailure(OMResponse.Builder omResponse, IOException ex); - OMClientResponse onReplay(OMResponse.Builder omResonse) { - return new OMVolumeAclOpResponse(createReplayOMResponse(omResonse)); - } - /** * Completion hook for final processing before return without lock. * Usually used for logging without lock. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java index e0f9b3d5e79a..12008e245477 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java @@ -104,12 +104,6 @@ void onComplete(Result result, IOException ex, long trxnLogIndex) { getVolumeName()); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Add acl {} to volume {} failed!", getAcl(), getVolumeName(), ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java index 6e9073192306..461ad481e6a9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java @@ -103,12 +103,6 @@ void onComplete(Result result, IOException ex, long trxnLogIndex) { getVolumeName()); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Remove acl {} from volume {} failed!", getAcl(), getVolumeName(), ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java index 8d5bc61ceede..c73e19e75241 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java @@ -100,12 +100,6 @@ void onComplete(Result result, IOException ex, long trxnLogIndex) { getVolumeName()); } break; - case REPLAY: - if (LOG.isDebugEnabled()) { - LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex, - getOmRequest()); - } - break; case FAILURE: LOG.error("Set acls {} to volume {} failed!", getAcls(), getVolumeName(), ex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/CleanupTableInfo.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/CleanupTableInfo.java index 5a16293ac517..e4564233c782 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/CleanupTableInfo.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/CleanupTableInfo.java @@ -16,7 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.ozone.om.response; -import org.apache.hadoop.classification.InterfaceStability; + +import org.apache.hadoop.hdds.annotation.InterfaceStability; import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java index aa3b3465d570..4af78fe3306c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java @@ -42,7 +42,7 @@ public OMClientResponse(OMResponse omResponse) { } /** - * For error or replay cases, check that the status of omResponse is not OK. + * For error case, check that the status of omResponse is not OK. */ public void checkStatusNotOK() { Preconditions.checkArgument(!omResponse.getStatus().equals( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java index 6948b678385f..cb1f32247b68 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java @@ -48,7 +48,7 @@ public OMBucketCreateResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMBucketCreateResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java index 44167579b80f..c3c7fefc5bba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java @@ -48,7 +48,7 @@ public OMBucketDeleteResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMBucketDeleteResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java index c827e68eb8bf..b9d3cf04f992 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java @@ -45,7 +45,7 @@ public OMBucketSetPropertyResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMBucketSetPropertyResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java index 442dcd1abaeb..0a725231925a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java @@ -46,7 +46,7 @@ public OMBucketAclResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMBucketAclResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java index 499b6f1a2201..2608a1b372e2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java @@ -21,6 +21,8 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result; + import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; @@ -29,7 +31,6 @@ import org.slf4j.LoggerFactory; import javax.annotation.Nonnull; -import javax.annotation.Nullable; import java.io.IOException; import java.util.List; @@ -46,51 +47,50 @@ public class OMDirectoryCreateResponse extends OMClientResponse { private OmKeyInfo dirKeyInfo; private List parentKeyInfos; + private Result result; public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse, - @Nullable OmKeyInfo dirKeyInfo, - @Nullable List parentKeyInfos) { - + @Nonnull OmKeyInfo dirKeyInfo, + @Nonnull List parentKeyInfos, @Nonnull Result result) { super(omResponse); this.dirKeyInfo = dirKeyInfo; this.parentKeyInfos = parentKeyInfos; + this.result = result; } /** - * For when the request is not successful or it is a replay transaction or - * the directory already exists. + * For when the request is not successful or the directory already exists. */ - public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse) { + public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse, + @Nonnull Result result) { super(omResponse); + this.result = result; } @Override protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { - if (dirKeyInfo != null) { - if (parentKeyInfos != null) { - for (OmKeyInfo parentKeyInfo : parentKeyInfos) { - String parentKey = omMetadataManager - .getOzoneDirKey(parentKeyInfo.getVolumeName(), - parentKeyInfo.getBucketName(), parentKeyInfo.getKeyName()); - LOG.debug("putWithBatch parent : key {} info : {}", parentKey, - parentKeyInfo); - omMetadataManager.getKeyTable() - .putWithBatch(batchOperation, parentKey, parentKeyInfo); - } + if (Result.SUCCESS == result) { + // Add all parent keys to batch. + for (OmKeyInfo parentKeyInfo : parentKeyInfos) { + String parentKey = omMetadataManager + .getOzoneDirKey(parentKeyInfo.getVolumeName(), + parentKeyInfo.getBucketName(), parentKeyInfo.getKeyName()); + LOG.debug("putWithBatch parent : key {} info : {}", parentKey, + parentKeyInfo); + omMetadataManager.getKeyTable() + .putWithBatch(batchOperation, parentKey, parentKeyInfo); } String dirKey = omMetadataManager.getOzoneKey(dirKeyInfo.getVolumeName(), dirKeyInfo.getBucketName(), dirKeyInfo.getKeyName()); omMetadataManager.getKeyTable().putWithBatch(batchOperation, dirKey, dirKeyInfo); - - } else { + } else if (Result.DIRECTORY_ALREADY_EXISTS == result) { // When directory already exists, we don't add it to cache. And it is // not an error, in this case dirKeyInfo will be null. - LOG.debug("Response Status is OK, dirKeyInfo is null in " + - "OMDirectoryCreateResponse"); + LOG.debug("Directory already exists. addToDBBatch is a no-op"); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java index de069cc0990f..e54379b1616b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java @@ -34,12 +34,12 @@ public class OMFileCreateResponse extends OMKeyCreateResponse { public OMFileCreateResponse(@Nonnull OMResponse omResponse, @Nonnull OmKeyInfo omKeyInfo, - List parentKeyInfos, long openKeySessionID) { + @Nonnull List parentKeyInfos, long openKeySessionID) { super(omResponse, omKeyInfo, parentKeyInfos, openKeySessionID); } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMFileCreateResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java index 7d1bd44915ae..5ea44a75bc6d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java @@ -48,7 +48,7 @@ public OMAllocateBlockResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMAllocateBlockResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java index 9f97bbb1a8f5..c0216eb3a054 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java @@ -50,21 +50,7 @@ public OMKeyCommitResponse(@Nonnull OMResponse omResponse, } /** - * When the KeyCommit request is a replay but the openKey should be deleted - * from the OpenKey table. - * Note that this response will result in openKey deletion only. Key will - * not be added to Key table. - * @param openKeyName openKey to be deleted from OpenKey table - */ - public OMKeyCommitResponse(@Nonnull OMResponse omResponse, - String openKeyName) { - super(omResponse); - this.omKeyInfo = null; - this.openKeyName = openKeyName; - } - - /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyCommitResponse(@Nonnull OMResponse omResponse) { @@ -80,12 +66,8 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation, openKeyName); - // Add entry to Key table if omKeyInfo is available i.e. it is not a - // replayed transaction. - if (omKeyInfo != null) { - omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName, - omKeyInfo); - } + omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName, + omKeyInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java index db9815ac1ea7..4d0899df250c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java @@ -56,7 +56,7 @@ public OMKeyCreateResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyCreateResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index e0228f6c202f..f0ba99116c1f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; @@ -54,7 +53,7 @@ public OMKeyDeleteResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyDeleteResponse(@Nonnull OMResponse omResponse) { @@ -68,31 +67,28 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, // For OmResponse with failure, this should do nothing. This method is // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - ozoneKey); + String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); + omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, ozoneKey); - // If Key is not empty add this to delete table. - if (!isKeyEmpty(omKeyInfo)) { - // If a deleted key is put in the table where a key with the same - // name already exists, then the old deleted key information would be - // lost. To avoid this, first check if a key with same name exists. - // deletedTable in OM Metadata stores . - // The RepeatedOmKeyInfo is the structure that allows us to store a - // list of OmKeyInfo that can be tied to same key name. For a keyName - // if RepeatedOMKeyInfo structure is null, we create a new instance, - // if it is not null, then we simply add to the list and store this - // instance in deletedTable. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(ozoneKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), - isRatisEnabled); - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, + // If Key is not empty add this to delete table. + if (!isKeyEmpty(omKeyInfo)) { + // If a deleted key is put in the table where a key with the same + // name already exists, then the old deleted key information would be + // lost. To avoid this, first check if a key with same name exists. + // deletedTable in OM Metadata stores . + // The RepeatedOmKeyInfo is the structure that allows us to store a + // list of OmKeyInfo that can be tied to same key name. For a keyName + // if RepeatedOMKeyInfo structure is null, we create a new instance, + // if it is not null, then we simply add to the list and store this + // instance in deletedTable. + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(ozoneKey); + repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), + isRatisEnabled); + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, ozoneKey, repeatedOmKeyInfo); - } } } @@ -109,7 +105,7 @@ private boolean isKeyEmpty(@Nullable OmKeyInfo keyInfo) { } for (OmKeyLocationInfoGroup keyLocationList : keyInfo .getKeyLocationVersions()) { - if (keyLocationList.getLocationList().size() != 0) { + if (keyLocationList.getLocationListCount() != 0) { return false; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index e26433f7a21a..01b7457085cc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -39,20 +39,11 @@ public class OMKeyPurgeResponse extends OMClientResponse { private List purgeKeyList; public OMKeyPurgeResponse(@Nonnull OMResponse omResponse, - List keyList) { + @Nonnull List keyList) { super(omResponse); this.purgeKeyList = keyList; } - /** - * For when the request is not successful or it is a replay transaction. - * For a successful request, the other constructor should be used. - */ - public OMKeyPurgeResponse(@Nonnull OMResponse omResponse) { - super(omResponse); - checkStatusNotOK(); - } - @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java index 3e640722ddc2..7470b3788443 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.response.key; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; @@ -40,38 +39,18 @@ public class OMKeyRenameResponse extends OMClientResponse { private String fromKeyName; private String toKeyName; - private OmKeyInfo newKeyInfo; + private OmKeyInfo renameKeyInfo; public OMKeyRenameResponse(@Nonnull OMResponse omResponse, String fromKeyName, String toKeyName, @Nonnull OmKeyInfo renameKeyInfo) { super(omResponse); this.fromKeyName = fromKeyName; this.toKeyName = toKeyName; - this.newKeyInfo = renameKeyInfo; + this.renameKeyInfo = renameKeyInfo; } /** - * When Rename request is replayed and toKey already exists, but fromKey - * has not been deleted. - * For example, lets say we have the following sequence of transactions - * Trxn 1 : Create Key1 - * Trnx 2 : Rename Key1 to Key2 -> Deletes Key1 and Creates Key2 - * Now if these transactions are replayed: - * Replay Trxn 1 : Creates Key1 again as Key1 does not exist in DB - * Replay Trxn 2 : Key2 is not created as it exists in DB and the request - * would be deemed a replay. But Key1 is still in the DB and needs to be - * deleted. - */ - public OMKeyRenameResponse(@Nonnull OMResponse omResponse, - String fromKeyName, OmKeyInfo fromKeyInfo) { - super(omResponse); - this.fromKeyName = fromKeyName; - this.newKeyInfo = fromKeyInfo; - this.toKeyName = null; - } - - /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyRenameResponse(@Nonnull OMResponse omResponse) { @@ -82,31 +61,13 @@ public OMKeyRenameResponse(@Nonnull OMResponse omResponse) { @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { - String volumeName = newKeyInfo.getVolumeName(); - String bucketName = newKeyInfo.getBucketName(); - // If toKeyName is null, then we need to only delete the fromKeyName from - // KeyTable. This is the case of replay where toKey exists but fromKey - // has not been deleted. - if (deleteFromKeyOnly()) { - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName)); - } else if (createToKeyAndDeleteFromKey()) { - // If both from and toKeyName are equal do nothing - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName)); - omMetadataManager.getKeyTable().putWithBatch(batchOperation, - omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName), - newKeyInfo); - } + String volumeName = renameKeyInfo.getVolumeName(); + String bucketName = renameKeyInfo.getBucketName(); + omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, + omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName)); + omMetadataManager.getKeyTable().putWithBatch(batchOperation, + omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName), + renameKeyInfo); } - @VisibleForTesting - public boolean deleteFromKeyOnly() { - return toKeyName == null && fromKeyName != null; - } - - @VisibleForTesting - public boolean createToKeyAndDeleteFromKey() { - return toKeyName != null && !toKeyName.equals(fromKeyName); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java index 597841ca3d75..9d2cd539fbd5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java @@ -18,17 +18,13 @@ package org.apache.hadoop.ozone.om.response.key; -import com.google.common.base.Optional; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import javax.annotation.Nonnull; @@ -36,7 +32,8 @@ import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; /** * Response for DeleteKey request. @@ -48,10 +45,10 @@ public class OMKeysDeleteResponse extends OMClientResponse { private long trxnLogIndex; public OMKeysDeleteResponse(@Nonnull OMResponse omResponse, - @Nonnull List omKeyInfoList, + @Nonnull List keyDeleteList, long trxnLogIndex, boolean isRatisEnabled) { super(omResponse); - this.omKeyInfoList = omKeyInfoList; + this.omKeyInfoList = keyDeleteList; this.isRatisEnabled = isRatisEnabled; this.trxnLogIndex = trxnLogIndex; } @@ -65,69 +62,48 @@ public OMKeysDeleteResponse(@Nonnull OMResponse omResponse) { checkStatusNotOK(); } + public void checkAndUpdateDB(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + if (getOMResponse().getStatus() == OK || + getOMResponse().getStatus() == PARTIAL_DELETE) { + addToDBBatch(omMetadataManager, batchOperation); + } + } + @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + String volumeName = ""; + String bucketName = ""; + String keyName = ""; for (OmKeyInfo omKeyInfo : omKeyInfoList) { - // Set the UpdateID to current transactionLogIndex - omKeyInfo.setUpdateID(trxnLogIndex, isRatisEnabled); + volumeName = omKeyInfo.getVolumeName(); + bucketName = omKeyInfo.getBucketName(); + keyName = omKeyInfo.getKeyName(); - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - boolean acquiredLock = false; - String volumeName = ""; - String bucketName = ""; + String deleteKey = omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); - try { - volumeName = omKeyInfo.getVolumeName(); - bucketName = omKeyInfo.getBucketName(); - String keyName = omKeyInfo.getKeyName(); - acquiredLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - // Update table cache. - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName)), - new CacheValue<>(Optional.absent(), trxnLogIndex)); + omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, + deleteKey); - String ozoneKey = omMetadataManager.getOzoneKey( - omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), - omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - ozoneKey); - // If a deleted key is put in the table where a key with the same - // name already exists, then the old deleted key information would - // be lost. To avoid this, first check if a key with same name - // exists. deletedTable in OM Metadata stores . The RepeatedOmKeyInfo is the structure that - // allows us to store a list of OmKeyInfo that can be tied to same - // key name. For a keyName if RepeatedOMKeyInfo structure is null, - // we create a new instance, if it is not null, then we simply add - // to the list and store this instance in deletedTable. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(ozoneKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), - isRatisEnabled); - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - ozoneKey, repeatedOmKeyInfo); - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock( - BUCKET_LOCK, volumeName, bucketName); - acquiredLock = false; - } - } finally { - if (acquiredLock) { - omMetadataManager.getLock() - .releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - } + // If a deleted key is put in the table where a key with the same + // name already exists, then the old deleted key information would + // be lost. To avoid this, first check if a key with same name + // exists. deletedTable in OM Metadata stores . The RepeatedOmKeyInfo is the structure that + // allows us to store a list of OmKeyInfo that can be tied to same + // key name. For a keyName if RepeatedOMKeyInfo structure is null, + // we create a new instance, if it is not null, then we simply add + // to the list and store this instance in deletedTable. + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(deleteKey); + repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + omKeyInfo, repeatedOmKeyInfo, trxnLogIndex, + isRatisEnabled); + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, + deleteKey, repeatedOmKeyInfo); } } - } \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java new file mode 100644 index 000000000000..a9ff7ada1bd7 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Response for RenameKeys request. + */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE}) +public class OMKeysRenameResponse extends OMClientResponse { + + private OmRenameKeys omRenameKeys; + + public OMKeysRenameResponse(@Nonnull OMResponse omResponse, + OmRenameKeys omRenameKeys) { + super(omResponse); + this.omRenameKeys = omRenameKeys; + } + + + /** + * For when the request is not successful or it is a replay transaction. + * For a successful request, the other constructor should be used. + */ + public OMKeysRenameResponse(@Nonnull OMResponse omResponse) { + super(omResponse); + checkStatusNotOK(); + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String volumeName = omRenameKeys.getVolume(); + String bucketName = omRenameKeys.getBucket(); + + for (Map.Entry< String, OmKeyInfo> entry : + omRenameKeys.getFromKeyAndToKeyInfo().entrySet()) { + String fromKeyName = entry.getKey(); + OmKeyInfo newKeyInfo = entry.getValue(); + String toKeyName = newKeyInfo.getKeyName(); + + omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, + omMetadataManager + .getOzoneKey(volumeName, bucketName, fromKeyName)); + omMetadataManager.getKeyTable().putWithBatch(batchOperation, + omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName), + newKeyInfo); + } + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java index e3177f8b26de..2bbeae0addde 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java @@ -47,7 +47,7 @@ public OMKeyAclResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMKeyAclResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java index 225bad33e396..288a38fea0f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java @@ -45,7 +45,7 @@ public OMPrefixAclResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMPrefixAclResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java index 0cc8dff52434..ec1b3ae0ff61 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java @@ -52,7 +52,7 @@ public S3InitiateMultipartUploadResponse( } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public S3InitiateMultipartUploadResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java index b47b22b92d9c..47cde0870103 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java @@ -61,7 +61,7 @@ public S3MultipartUploadAbortResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public S3MultipartUploadAbortResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index 298b73310e3f..f68af4a23dfc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -41,6 +41,7 @@ .Status.OK; import javax.annotation.Nonnull; +import javax.annotation.Nullable; /** * Response for S3MultipartUploadCommitPart request. @@ -69,45 +70,19 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse { */ public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse, String multipartKey, String openKey, - @Nonnull OmMultipartKeyInfo omMultipartKeyInfo, - @Nonnull OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, + @Nullable OmMultipartKeyInfo omMultipartKeyInfo, + @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, + @Nullable OmKeyInfo openPartKeyInfoToBeDeleted, boolean isRatisEnabled) { super(omResponse); this.multipartKey = multipartKey; this.openKey = openKey; this.omMultipartKeyInfo = omMultipartKeyInfo; this.oldPartKeyInfo = oldPartKeyInfo; - this.isRatisEnabled = isRatisEnabled; - } - - /** - * For the case when Multipart Upload does not exist (could have been - * aborted). - * 1. Put the partKeyInfo from openKeyTable into DeletedTable - * 2. Deleted openKey from OpenKeyTable - * @param omResponse - * @param openKey - * @param openPartKeyInfoToBeDeleted - */ - public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse, - String openKey, @Nonnull OmKeyInfo openPartKeyInfoToBeDeleted, - boolean isRatisEnabled) { - super(omResponse); - checkStatusNotOK(); - this.openKey = openKey; this.openPartKeyInfoToBeDeleted = openPartKeyInfoToBeDeleted; this.isRatisEnabled = isRatisEnabled; } - /** - * For when the request is not successful or it is a replay transaction. - * For a successful request, the other constructor should be used. - */ - public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse) { - super(omResponse); - checkStatusNotOK(); - } - @Override public void checkAndUpdateDB(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { @@ -115,12 +90,14 @@ public void checkAndUpdateDB(OMMetadataManager omMetadataManager, if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) { // Means by the time we try to commit part, some one has aborted this // multipart upload. So, delete this part information. + RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable().get(openKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - openPartKeyInfoToBeDeleted, repeatedOmKeyInfo, - openPartKeyInfoToBeDeleted.getUpdateID(), isRatisEnabled); + repeatedOmKeyInfo = + OmUtils.prepareKeyForDelete(openPartKeyInfoToBeDeleted, + repeatedOmKeyInfo, openPartKeyInfoToBeDeleted.getUpdateID(), + isRatisEnabled); omMetadataManager.getDeletedTable().putWithBatch(batchOperation, openKey, repeatedOmKeyInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java index 093d1808db82..20e398eb4f09 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java @@ -59,20 +59,7 @@ public S3MultipartUploadCompleteResponse( } /** - * When the S3MultipartUploadCompleteRequest is a replay but the - * openKey should be deleted from the OpenKey table. - * Note that this response will result in openKey deletion and - * multipartInfo deletion only. Key will not be added to Key table. - */ - public S3MultipartUploadCompleteResponse( - @Nonnull OMResponse omResponse, - @Nonnull String multipartKey) { - super(omResponse); - this.multipartKey = multipartKey; - } - - /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public S3MultipartUploadCompleteResponse(@Nonnull OMResponse omResponse) { @@ -89,26 +76,23 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation, multipartKey); - if (omKeyInfo != null) { - String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().putWithBatch(batchOperation, - ozoneKey, omKeyInfo); + String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); + omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey, + omKeyInfo); - if (!partsUnusedList.isEmpty()) { - // Add unused parts to deleted key table. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable() - .get(ozoneKey); - if (repeatedOmKeyInfo == null) { - repeatedOmKeyInfo = new RepeatedOmKeyInfo(partsUnusedList); - } else { - repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo); - } - - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - ozoneKey, repeatedOmKeyInfo); + if (!partsUnusedList.isEmpty()) { + // Add unused parts to deleted key table. + RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable() + .get(ozoneKey); + if (repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo(partsUnusedList); + } else { + repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo); } + + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, + ozoneKey, repeatedOmKeyInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java index 647123dff9b4..f9f0688c3a05 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java @@ -46,7 +46,7 @@ public OMVolumeAclOpResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMVolumeAclOpResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java index cd70dc2bcf1d..1b8e26e246b3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java @@ -53,7 +53,7 @@ public OMVolumeCreateResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMVolumeCreateResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java index 80d9e8c3693e..db43fa641ed8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java @@ -52,7 +52,7 @@ public OMVolumeDeleteResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMVolumeDeleteResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java index 3ed8bb0220fc..a1efe703f957 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java @@ -58,8 +58,8 @@ public OMVolumeSetOwnerResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. - * Or when newOwner is the same as oldOwner. + * For when the request is not successful or when newOwner is the same as + * oldOwner. * For other successful requests, the other constructor should be used. */ public OMVolumeSetOwnerResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java index b50a923620e3..c6210254b9ec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java @@ -46,7 +46,7 @@ public OMVolumeSetQuotaResponse(@Nonnull OMResponse omResponse, } /** - * For when the request is not successful or it is a replay transaction. + * For when the request is not successful. * For a successful request, the other constructor should be used. */ public OMVolumeSetQuotaResponse(@Nonnull OMResponse omResponse) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java index 1c78251abb92..a11c60b9435d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java @@ -113,8 +113,10 @@ public OzoneManagerSnapshotProvider(MutableConfigurationSource conf, public DBCheckpoint getOzoneManagerDBSnapshot(String leaderOMNodeID) throws IOException { String snapshotTime = Long.toString(System.currentTimeMillis()); - String snapshotFileName = Paths.get(omSnapshotDir.getAbsolutePath(), - snapshotTime, OM_DB_NAME).toFile().getAbsolutePath(); + String snapshotFileName = OM_DB_NAME + "-" + leaderOMNodeID + + "-" + snapshotTime; + String snapshotFilePath = Paths.get(omSnapshotDir.getAbsolutePath(), + snapshotFileName).toFile().getAbsolutePath(); File targetFile = new File(snapshotFileName + ".tar.gz"); String omCheckpointUrl = peerNodesMap.get(leaderOMNodeID) @@ -141,11 +143,11 @@ public DBCheckpoint getOzoneManagerDBSnapshot(String leaderOMNodeID) }); // Untar the checkpoint file. - Path untarredDbDir = Paths.get(snapshotFileName); + Path untarredDbDir = Paths.get(snapshotFilePath); FileUtil.unTar(targetFile, untarredDbDir.toFile()); FileUtils.deleteQuietly(targetFile); - LOG.info("Sucessfully downloaded latest checkpoint from leader OM: {}", + LOG.info("Successfully downloaded latest checkpoint from leader OM: {}", leaderOMNodeID); RocksDBCheckpoint omCheckpoint = new RocksDBCheckpoint(untarredDbDir); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 3183aa15ff75..cd3287eddb53 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -417,7 +417,7 @@ private ListTrashResponse listTrash(ListTrashRequest request) request.getMaxKeys()); for (RepeatedOmKeyInfo key: deletedKeys) { - resp.addDeletedKeys(key.getProto()); + resp.addDeletedKeys(key.getProto(false)); } return resp.build(); @@ -536,6 +536,7 @@ private GetFileStatusResponse getOzoneFileStatus( .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .setRefreshPipeline(true) + .setSortDatanodesInPipeline(keyArgs.getSortDatanodes()) .build(); GetFileStatusResponse.Builder rb = GetFileStatusResponse.newBuilder(); @@ -568,6 +569,7 @@ private ListStatusResponse listStatus( .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .setRefreshPipeline(true) + .setSortDatanodesInPipeline(keyArgs.getSortDatanodes()) .build(); List statuses = impl.listStatus(omKeyArgs, request.getRecursive(), diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html index 5fb6e44f83ee..8a5297b40f75 100644 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html +++ b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html @@ -57,7 +57,7 @@ - + diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html index 082189965182..f6572110d19e 100644 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html +++ b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html @@ -15,4 +15,6 @@ limitations under the License. --> + + \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html new file mode 100644 index 000000000000..4f52e413b2fd --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html @@ -0,0 +1,26 @@ + + +

Status

+ + + + + + + +
Rpc port{{$ctrl.overview.jmx.RpcPort}}
diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js index fda6d8fc0b5c..6c59a5be5856 100644 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js +++ b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js @@ -108,5 +108,10 @@ }) } }); - + angular.module('ozoneManager').component('omOverview', { + templateUrl: 'om-overview.html', + require: { + overview: "^overview" + }, + }); })(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 4e62eb8fa193..d06e43d8a3dd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om; +import java.io.File; import java.io.IOException; import java.time.Instant; import java.util.ArrayList; @@ -30,6 +31,7 @@ import java.util.UUID; import com.google.common.base.Optional; +import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -65,6 +67,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -80,12 +83,14 @@ public class TestKeyManagerUnit { private KeyManagerImpl keyManager; private Instant startDate; + private File testDir; @Before public void setup() throws IOException { configuration = new OzoneConfiguration(); + testDir = GenericTestUtils.getRandomizedTestDir(); configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, - GenericTestUtils.getRandomizedTestDir().toString()); + testDir.toString()); metadataManager = new OmMetadataManagerImpl(configuration); keyManager = new KeyManagerImpl( Mockito.mock(ScmBlockLocationProtocol.class), @@ -98,6 +103,12 @@ public void setup() throws IOException { startDate = Instant.now(); } + @After + public void cleanup() throws Exception { + metadataManager.stop(); + FileUtils.deleteDirectory(testDir); + } + @Test public void listMultipartUploadPartsWithZeroUpload() throws IOException { //GIVEN @@ -113,8 +124,6 @@ public void listMultipartUploadPartsWithZeroUpload() throws IOException { Assert.assertEquals(0, omMultipartUploadListParts.getPartInfoList().size()); - - this.startDate = Instant.now(); } @Test @@ -144,9 +153,11 @@ public void listMultipartUploads() throws IOException { Assert.assertEquals("dir/key2", uploads.get(1).getKeyName()); Assert.assertNotNull(uploads.get(1)); - Assert.assertNotNull(uploads.get(1).getCreationTime()); - Assert.assertTrue("Creation date is too old", - uploads.get(1).getCreationTime().compareTo(startDate) > 0); + Instant creationTime = uploads.get(1).getCreationTime(); + Assert.assertNotNull(creationTime); + Assert.assertFalse("Creation date is too old: " + + creationTime + " < " + startDate, + creationTime.isBefore(startDate)); } @Test @@ -406,26 +417,6 @@ public void testLookupFileWithDnFailure() throws IOException { .setBucketName("bucketOne") .setKeyName("keyOne"); - keyArgs.setRefreshPipeline(false); - final OmKeyInfo oldKeyInfo = manager - .lookupFile(keyArgs.build(), "test"); - - final OmKeyLocationInfo oldBlockLocation = oldKeyInfo - .getLatestVersionLocations().getBlocksLatestVersionOnly().get(0); - - Assert.assertEquals(1L, oldBlockLocation.getContainerID()); - Assert.assertEquals(1L, oldBlockLocation - .getBlockID().getLocalID()); - Assert.assertEquals(pipelineOne.getId(), - oldBlockLocation.getPipeline().getId()); - Assert.assertTrue(oldBlockLocation.getPipeline() - .getNodes().contains(dnOne)); - Assert.assertTrue(oldBlockLocation.getPipeline() - .getNodes().contains(dnTwo)); - Assert.assertTrue(oldBlockLocation.getPipeline() - .getNodes().contains(dnThree)); - - keyArgs.setRefreshPipeline(true); final OmKeyInfo newKeyInfo = manager .lookupFile(keyArgs.build(), "test"); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 054c97f396c7..7c2d258e9a00 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -33,9 +33,15 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.TreeSet; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; @@ -76,7 +82,7 @@ public void testTransactionTable() throws Exception { OMTransactionInfo omTransactionInfo = omMetadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY); - Assert.assertEquals(3, omTransactionInfo.getCurrentTerm()); + Assert.assertEquals(3, omTransactionInfo.getTerm()); Assert.assertEquals(250, omTransactionInfo.getTransactionIndex()); @@ -521,6 +527,77 @@ public void testListKeysWithFewDeleteEntriesInCache() throws Exception { } + @Test + public void testGetExpiredOpenKeys() throws Exception { + final String bucketName = "bucket"; + final String volumeName = "volume"; + final int numExpiredOpenKeys = 4; + final int numUnexpiredOpenKeys = 1; + final long clientID = 1000L; + // To create expired keys, they will be assigned a creation time twice as + // old as the minimum expiration time. + final long minExpiredTimeSeconds = ozoneConfiguration.getInt( + OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, + OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT); + final long expiredAgeMillis = + Instant.now().minus(minExpiredTimeSeconds * 2, + ChronoUnit.SECONDS).toEpochMilli(); + + // Add expired keys to open key table. + // The method under test does not check for expired open keys in the + // cache, since they will be picked up once the cache is flushed. + Set expiredKeys = new HashSet<>(); + for (int i = 0; i < numExpiredOpenKeys; i++) { + OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + bucketName, "expired" + i, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, 0L, expiredAgeMillis); + + TestOMRequestUtils.addKeyToTable(true, false, + keyInfo, clientID, 0L, omMetadataManager); + + String groupID = omMetadataManager.getOpenKey(volumeName, bucketName, + keyInfo.getKeyName(), clientID); + expiredKeys.add(groupID); + } + + // Add unexpired keys to open key table. + for (int i = 0; i < numUnexpiredOpenKeys; i++) { + OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + bucketName, "unexpired" + i, HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); + + TestOMRequestUtils.addKeyToTable(true, false, + keyInfo, clientID, 0L, omMetadataManager); + } + + // Test retrieving fewer expired keys than actually exist. + List someExpiredKeys = + omMetadataManager.getExpiredOpenKeys(numExpiredOpenKeys - 1); + + Assert.assertEquals(numExpiredOpenKeys - 1, someExpiredKeys.size()); + for (String key: someExpiredKeys) { + Assert.assertTrue(expiredKeys.contains(key)); + } + + // Test attempting to retrieving more expired keys than actually exist. + List allExpiredKeys = + omMetadataManager.getExpiredOpenKeys(numExpiredOpenKeys + 1); + + Assert.assertEquals(numExpiredOpenKeys, allExpiredKeys.size()); + for (String key: allExpiredKeys) { + Assert.assertTrue(expiredKeys.contains(key)); + } + + // Test retrieving exact amount of expired keys that exist. + allExpiredKeys = + omMetadataManager.getExpiredOpenKeys(numExpiredOpenKeys); + + Assert.assertEquals(numExpiredOpenKeys, allExpiredKeys.size()); + for (String key: allExpiredKeys) { + Assert.assertTrue(expiredKeys.contains(key)); + } + } + private void addKeysToOM(String volumeName, String bucketName, String keyName, int i) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java new file mode 100644 index 000000000000..39c35f675e6f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.codec; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.util.Time; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +/** + * This class tests OmKeyInfoCodec. + */ +public class TestOmKeyInfoCodec { + private final String volume = "hadoop"; + private final String bucket = "ozone"; + private final String keyName = "user/root/terasort/10G-input-6/part-m-00037"; + + + private OmKeyInfo getKeyInfo(int chunkNum) { + List omKeyLocationInfoList = new ArrayList<>(); + Pipeline pipeline = TestUtils.getRandomPipeline(); + for (int i = 0; i < chunkNum; i++) { + BlockID blockID = new BlockID(i, i); + OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() + .setBlockID(blockID) + .setPipeline(pipeline) + .build(); + omKeyLocationInfoList.add(keyLocationInfo); + } + OmKeyLocationInfoGroup omKeyLocationInfoGroup = new + OmKeyLocationInfoGroup(0, omKeyLocationInfoList); + return new OmKeyInfo.Builder() + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setReplicationType(HddsProtos.ReplicationType.RATIS) + .setReplicationFactor(HddsProtos.ReplicationFactor.THREE) + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(keyName) + .setObjectID(Time.now()) + .setUpdateID(Time.now()) + .setDataSize(100) + .setOmKeyLocationInfos( + Collections.singletonList(omKeyLocationInfoGroup)) + .build(); + } + + @Test + public void test() { + testOmKeyInfoCodecWithoutPipeline(1); + testOmKeyInfoCodecWithoutPipeline(2); + testOmKeyInfoCodecCompatibility(1); + testOmKeyInfoCodecCompatibility(2); + } + + public void testOmKeyInfoCodecWithoutPipeline(int chunkNum) { + OmKeyInfoCodec codec = new OmKeyInfoCodec(true); + OmKeyInfo originKey = getKeyInfo(chunkNum); + try { + byte[] rawData = codec.toPersistedFormat(originKey); + OmKeyInfo key = codec.fromPersistedFormat(rawData); + System.out.println("Chunk number = " + chunkNum + + ", Serialized key size without pipeline = " + rawData.length); + assertNull(key.getLatestVersionLocations().getLocationList().get(0) + .getPipeline()); + } catch (IOException e) { + fail("Should success"); + } + } + + public void testOmKeyInfoCodecCompatibility(int chunkNum) { + OmKeyInfoCodec codecWithoutPipeline = new OmKeyInfoCodec(true); + OmKeyInfoCodec codecWithPipeline = new OmKeyInfoCodec(false); + OmKeyInfo originKey = getKeyInfo(chunkNum); + try { + byte[] rawData = codecWithPipeline.toPersistedFormat(originKey); + OmKeyInfo key = codecWithoutPipeline.fromPersistedFormat(rawData); + System.out.println("Chunk number = " + chunkNum + + ", Serialized key size with pipeline = " + rawData.length); + assertNotNull(key.getLatestVersionLocations().getLocationList().get(0) + .getPipeline()); + } catch (IOException e) { + fail("Should success"); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java new file mode 100644 index 000000000000..0eb87b8cfa2c --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.codec; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.util.Time; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +/** + * This class tests RepeatedOmKeyInfoCodec. + */ +public class TestRepeatedOmKeyInfoCodec { + private final String volume = "hadoop"; + private final String bucket = "ozone"; + private final String keyName = "user/root/terasort/10G-input-6/part-m-00037"; + + + private OmKeyInfo getKeyInfo(int chunkNum) { + List omKeyLocationInfoList = new ArrayList<>(); + Pipeline pipeline = TestUtils.getRandomPipeline(); + for (int i = 0; i < chunkNum; i++) { + BlockID blockID = new BlockID(i, i); + OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() + .setBlockID(blockID) + .setPipeline(pipeline) + .build(); + omKeyLocationInfoList.add(keyLocationInfo); + } + OmKeyLocationInfoGroup omKeyLocationInfoGroup = new + OmKeyLocationInfoGroup(0, omKeyLocationInfoList); + return new OmKeyInfo.Builder() + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setReplicationType(HddsProtos.ReplicationType.RATIS) + .setReplicationFactor(HddsProtos.ReplicationFactor.THREE) + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(keyName) + .setObjectID(Time.now()) + .setUpdateID(Time.now()) + .setDataSize(100) + .setOmKeyLocationInfos( + Collections.singletonList(omKeyLocationInfoGroup)) + .build(); + } + + @Test + public void test() { + testWithoutPipeline(1); + testWithoutPipeline(2); + testCompatibility(1); + testCompatibility(2); + } + + public void testWithoutPipeline(int chunkNum) { + RepeatedOmKeyInfoCodec codec = new RepeatedOmKeyInfoCodec(true); + OmKeyInfo originKey = getKeyInfo(chunkNum); + RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(originKey); + try { + byte[] rawData = codec.toPersistedFormat(repeatedOmKeyInfo); + RepeatedOmKeyInfo key = codec.fromPersistedFormat(rawData); + System.out.println("Chunk number = " + chunkNum + + ", Serialized key size without pipeline = " + rawData.length); + assertNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations() + .getLocationList().get(0).getPipeline()); + } catch (IOException e) { + fail("Should success"); + } + } + + public void testCompatibility(int chunkNum) { + RepeatedOmKeyInfoCodec codecWithoutPipeline = + new RepeatedOmKeyInfoCodec(true); + RepeatedOmKeyInfoCodec codecWithPipeline = + new RepeatedOmKeyInfoCodec(false); + OmKeyInfo originKey = getKeyInfo(chunkNum); + RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(originKey); + try { + byte[] rawData = codecWithPipeline.toPersistedFormat(repeatedOmKeyInfo); + RepeatedOmKeyInfo key = codecWithoutPipeline.fromPersistedFormat(rawData); + System.out.println("Chunk number = " + chunkNum + + ", Serialized key size with pipeline = " + rawData.length); + assertNotNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations() + .getLocationList().get(0).getPipeline()); + } catch (IOException e) { + fail("Should success"); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index 7b86006b9379..372679b2b3eb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -139,7 +139,7 @@ public void testDoubleBufferWithDummyResponse() throws Exception { Assert.assertEquals(lastAppliedIndex, omTransactionInfo.getTransactionIndex()); - Assert.assertEquals(term, omTransactionInfo.getCurrentTerm()); + Assert.assertEquals(term, omTransactionInfo.getTerm()); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index b3693415b183..260e2cd17c10 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -202,7 +202,7 @@ public void testDoubleBufferWithMixOfTransactions() throws Exception { Assert.assertEquals(lastAppliedIndex, omTransactionInfo.getTransactionIndex()); - Assert.assertEquals(term, omTransactionInfo.getCurrentTerm()); + Assert.assertEquals(term, omTransactionInfo.getTerm()); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java new file mode 100644 index 000000000000..6137444b2296 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request; + +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import static org.apache.hadoop.ozone.om.request.OMClientRequest.validateAndNormalizeKey; +import static org.junit.Assert.fail; + +/** + * Class to test normalize paths. + */ +public class TestNormalizePaths { + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + @Test + public void testNormalizePathsEnabled() throws Exception { + + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "a/b/c/d")); + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "/a/b/c/d")); + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "////a/b/c/d")); + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "////a/b/////c/d")); + Assert.assertEquals("a/b/c/...../d", + validateAndNormalizeKey(true, "////a/b/////c/...../d")); + Assert.assertEquals("a/b/d", + validateAndNormalizeKey(true, "/a/b/c/../d")); + Assert.assertEquals("a", + validateAndNormalizeKey(true, "a")); + Assert.assertEquals("a/b", + validateAndNormalizeKey(true, "/a/./b")); + Assert.assertEquals("a/b", + validateAndNormalizeKey(true, ".//a/./b")); + Assert.assertEquals("a/", + validateAndNormalizeKey(true, "/a/.")); + Assert.assertEquals("b/c", + validateAndNormalizeKey(true, "//./b/c/")); + Assert.assertEquals("a/b/c/d", + validateAndNormalizeKey(true, "a/b/c/d/")); + Assert.assertEquals("a/b/c/...../d", + validateAndNormalizeKey(true, "////a/b/////c/...../d/")); + } + + @Test + public void testNormalizeKeyInvalidPaths() throws OMException { + checkInvalidPath("/a/b/c/../../../../../d"); + checkInvalidPath("../a/b/c/"); + checkInvalidPath("/../..a/b/c/"); + checkInvalidPath("//"); + checkInvalidPath("/////"); + checkInvalidPath(""); + checkInvalidPath("/"); + checkInvalidPath("/:/:"); + } + + private void checkInvalidPath(String keyName) { + try { + validateAndNormalizeKey(true, keyName); + fail("checkInvalidPath failed for path " + keyName); + } catch (OMException ex) { + Assert.assertTrue(ex.getMessage().contains("Invalid KeyPath")); + } + } + + + + @Test + public void testNormalizePathsDisable() throws OMException { + + Assert.assertEquals("/a/b/c/d", + validateAndNormalizeKey(false, "/a/b/c/d")); + Assert.assertEquals("////a/b/c/d", + validateAndNormalizeKey(false, "////a/b/c/d")); + Assert.assertEquals("////a/b/////c/d", + validateAndNormalizeKey(false, "////a/b/////c/d")); + Assert.assertEquals("////a/b/////c/...../d", + validateAndNormalizeKey(false, "////a/b/////c/...../d")); + Assert.assertEquals("/a/b/c/../d", + validateAndNormalizeKey(false, "/a/b/c/../d")); + Assert.assertEquals("/a/b/c/../../d", + validateAndNormalizeKey(false, "/a/b/c/../../d")); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java index dadeacb1943c..25268ff64194 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -136,6 +136,26 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor, trxnLogIndex); + addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, + omMetadataManager); + + } + + /** + * Add key entry to KeyTable. if openKeyTable flag is true, add's entries + * to openKeyTable, else add's it to keyTable. + * @throws Exception + */ + public static void addKeyToTable(boolean openKeyTable, boolean addToCache, + OmKeyInfo omKeyInfo, long clientID, + long trxnLogIndex, + OMMetadataManager omMetadataManager) + throws Exception { + + String volumeName = omKeyInfo.getVolumeName(); + String bucketName = omKeyInfo.getBucketName(); + String keyName = omKeyInfo.getKeyName(); + if (openKeyTable) { String ozoneKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientID); @@ -213,13 +233,24 @@ public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, String keyName, HddsProtos.ReplicationType replicationType, HddsProtos.ReplicationFactor replicationFactor, long objectID) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, + replicationFactor, objectID, Time.now()); + } + + /** + * Create OmKeyInfo. + */ + public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, + String keyName, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID, + long creationTime) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(Time.now()) + .setCreationTime(creationTime) .setModificationTime(Time.now()) .setDataSize(1000L) .setReplicationType(replicationType) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java index 7bef6b8957ab..06e140b2f55a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java @@ -219,32 +219,4 @@ public static void addCreateVolumeToTable(String volumeName, .setOwnerName(UUID.randomUUID().toString()).build(); TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OMRequest originalRequest = TestOMRequestUtils.createBucketRequest( - bucketName, volumeName, false, StorageTypeProto.SSD); - OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest( - originalRequest); - - // Manually add volume to DB table - addCreateVolumeToTable(volumeName, omMetadataManager); - - // Execute the original request - omBucketCreateRequest.preExecute(ozoneManager); - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index f99e1b6114ea..1037baa8eaf9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -105,46 +105,4 @@ private OMRequest createDeleteBucketRequest(String volumeName, .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket) .setClientId(UUID.randomUUID().toString()).build(); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - // CreateBucket request - OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest( - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, - false, OzoneManagerProtocolProtos.StorageTypeProto.SSD)); - - // Create volume entry in DB - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); - - // Execute CreateBucket request - omBucketCreateRequest.preExecute(ozoneManager); - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - // Execute the original DeleteBucket request - OMRequest omRequest = createDeleteBucketRequest(volumeName, bucketName); - OMBucketDeleteRequest omBucketDeleteRequest = new OMBucketDeleteRequest( - omRequest); - omBucketDeleteRequest.preExecute(ozoneManager); - omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 4, - ozoneManagerDoubleBufferHelper); - - // Create the bucket again - omBucketCreateRequest.preExecute(ozoneManager); - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 10, - ozoneManagerDoubleBufferHelper); - - // Replay the delete transaction - Execute the same request again - OMClientResponse omClientResponse = - omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 4, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java index 0670c3ed8340..cb0468ec757f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -119,33 +119,4 @@ private OMRequest createSetBucketPropertyRequest(String volumeName, .setCmdType(OzoneManagerProtocolProtos.Type.SetBucketProperty) .setClientId(UUID.randomUUID().toString()).build(); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - // Create request to enable versioning - OMRequest omRequest = createSetBucketPropertyRequest(volumeName, - bucketName, true); - OMBucketSetPropertyRequest omBucketSetPropertyRequest = - new OMBucketSetPropertyRequest(omRequest); - - // Execute the original request - omBucketSetPropertyRequest.preExecute(ozoneManager); - omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = omBucketSetPropertyRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index b714375249e2..c09bf8651e70 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -21,7 +21,9 @@ import java.util.UUID; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -86,6 +88,8 @@ public void setup() throws Exception { auditLogger = Mockito.mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + when(ozoneManager.resolveBucketLink(any(KeyArgs.class))) + .thenReturn(new ResolvedBucket(Pair.of("", ""), Pair.of("", ""))); } @After diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 7b6191c16192..c7aa6be9aa37 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -38,8 +38,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; -import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addKeyToTable; -import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS; @@ -401,31 +399,4 @@ private OMRequest createFileRequest( .setCreateFileRequest(createFileRequest).build(); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - OMRequest originalRequest = createFileRequest(volumeName, bucketName, - keyName, replicationFactor, replicationType, false, false); - OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest( - originalRequest); - - // Manually add volume, bucket and key to DB table - addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - addKeyToTable(false, false, volumeName, bucketName, keyName, clientID, - replicationType, replicationFactor, 1L, omMetadataManager); - - // Replay the transaction - Execute the createFile request again - OMClientResponse omClientResponse = - omFileCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java index 5228c5a9516c..5f704d357b0b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java @@ -36,7 +36,7 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest { @Test - public void testReplayRequest() throws Exception { + public void testAclRequest() throws Exception { // Manually add volume, bucket and key to DB TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); @@ -59,13 +59,6 @@ public void testReplayRequest() throws Exception { Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); - // Replay the original request - OMClientResponse replayResponse = omKeyAddAclRequest - .validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index f18ca8281677..b327b76e5136 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -207,85 +207,6 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { Assert.assertNull(omKeyInfo); } - @Test - public void testReplayRequest() throws Exception { - - // Manually add Volume, Bucket to DB - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - // Manually add Key to OpenKey table in DB - TestOMRequestUtils.addKeyToTable(true, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); - - OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest()); - - OMKeyCommitRequest omKeyCommitRequest = new OMKeyCommitRequest( - modifiedOmRequest); - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - // Key should not be there in key table, as validateAndUpdateCache is - // still not called. - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - Assert.assertNull(omKeyInfo); - - // Execute original KeyCommit request - omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 10L, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the createKey request again - OMClientResponse replayResponse = omKeyCommitRequest.validateAndUpdateCache( - ozoneManager, 10L, ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } - - @Test - public void testReplayRequestDeletesOpenKeyEntry() throws Exception { - - // Manually add Volume, Bucket to DB - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - // Manually add Key to OpenKey table in DB - TestOMRequestUtils.addKeyToTable(true, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); - - OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest()); - OMKeyCommitRequest omKeyCommitRequest = new OMKeyCommitRequest( - modifiedOmRequest); - - // Execute original KeyCommit request - omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 10L, - ozoneManagerDoubleBufferHelper); - - // Replay the Key Create request - add Key to OpenKey table manually again - TestOMRequestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); - - // Key should be present in OpenKey table - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); - OmKeyInfo openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - Assert.assertNotNull(openKeyInfo); - - // Replay the transaction - Execute the createKey request again - OMClientResponse replayResponse = omKeyCommitRequest.validateAndUpdateCache( - ozoneManager, 10L, ozoneManagerDoubleBufferHelper); - - // Replay should result in DELETE_OPEN_KEY_ONLY response and delete the - // key from OpenKey table - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - replayResponse.getOMResponse().getStatus()); - openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - Assert.assertNull(openKeyInfo); - } - /** * This method calls preExecute and verify the modified request. * @param originalOMRequest diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 7e9e09386fd5..2b8ffce49958 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -18,10 +18,15 @@ package org.apache.hadoop.ozone.om.request.key; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.junit.Assert; import org.junit.Test; @@ -37,8 +42,14 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; -import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addKeyToTable; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.when; /** * Tests OMCreateKeyRequest class. @@ -83,7 +94,7 @@ public void testValidateAndUpdateCache() throws Exception { omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L, ozoneManagerDoubleBufferHelper); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + Assert.assertEquals(OK, omKeyCreateResponse.getOMResponse().getStatus()); // Check open table whether key is added or not. @@ -311,6 +322,11 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { @SuppressWarnings("parameterNumber") private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) { + return createKeyRequest(isMultipartKey, partNumber, keyName); + } + + private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, + String keyName) { KeyArgs.Builder keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) @@ -328,48 +344,170 @@ private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) { .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) .setClientId(UUID.randomUUID().toString()) .setCreateKeyRequest(createKeyRequest).build(); - } @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setFactor(replicationFactor) - .setType(replicationType) - .build(); - - CreateKeyRequest.Builder req = CreateKeyRequest.newBuilder() - .setKeyArgs(keyArgs); - OMRequest originalRequest = OMRequest.newBuilder() - .setCreateKeyRequest(req) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) - .setClientId(UUID.randomUUID().toString()) - .build(); + public void testKeyCreateWithFileSystemPathsEnabled() throws Exception { + + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + when(ozoneManager.getConfiguration()).thenReturn(configuration); + when(ozoneManager.getEnableFileSystemPaths()).thenReturn(true); + + // Add volume and bucket entries to DB. + addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + + keyName = "dir1/dir2/dir3/file1"; + createAndCheck(keyName); + + // Key with leading '/'. + String keyName = "/a/b/c/file1"; + createAndCheck(keyName); + + // Commit openKey entry. + TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); + + // Now create another file in same dir path. + keyName = "/a/b/c/file2"; + createAndCheck(keyName); + + // Create key with multiple /'s + // converted to a/b/c/file5 + keyName = "///a/b///c///file5"; + createAndCheck(keyName); + + // converted to a/b/c/.../file3 + keyName = "///a/b///c//.../file3"; + createAndCheck(keyName); + + // converted to r1/r2 + keyName = "././r1/r2/"; + createAndCheck(keyName); + + // converted to ..d1/d2/d3 + keyName = "..d1/d2/d3/"; + createAndCheck(keyName); + + // Create a file, where a file already exists in the path. + // Now try with a file exists in path. Should fail. + keyName = "/a/b/c/file1/file3"; + checkNotAFile(keyName); + + // Empty keyName. + keyName = ""; + checkNotAValidPath(keyName); + + // Key name ends with / + keyName = "/a/./"; + checkNotAValidPath(keyName); + + keyName = "/////"; + checkNotAValidPath(keyName); + + keyName = "../../b/c"; + checkNotAValidPath(keyName); + + keyName = "../../b/c/"; + checkNotAValidPath(keyName); + + keyName = "../../b:/c/"; + checkNotAValidPath(keyName); + + keyName = ":/c/"; + checkNotAValidPath(keyName); + + keyName = ""; + checkNotAValidPath(keyName); + + keyName = "../a/b"; + checkNotAValidPath(keyName); + + keyName = "/../a/b"; + checkNotAValidPath(keyName); + + } + + + private void checkNotAValidPath(String keyName) { + OMRequest omRequest = createKeyRequest(false, 0, keyName); + OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest); + + try { + omKeyCreateRequest.preExecute(ozoneManager); + fail("checkNotAValidPath failed for path" + keyName); + } catch (IOException ex) { + Assert.assertTrue(ex instanceof OMException); + OMException omException = (OMException) ex; + Assert.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, + omException.getResult()); + } + + + } + private void checkNotAFile(String keyName) throws Exception { + OMRequest omRequest = createKeyRequest(false, 0, keyName); - OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest( - originalRequest); + OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest); - // Manually add volume, bucket and key to DB table - addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - addKeyToTable(false, false, volumeName, bucketName, keyName, clientID, - replicationType, replicationFactor, 1L, omMetadataManager); + omRequest = omKeyCreateRequest.preExecute(ozoneManager); + + omKeyCreateRequest = new OMKeyCreateRequest(omRequest); - // Replay the transaction - Execute the createKey request again OMClientResponse omClientResponse = - omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); + omKeyCreateRequest.validateAndUpdateCache(ozoneManager, + 101L, ozoneManagerDoubleBufferHelper); - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, + Assert.assertEquals(NOT_A_FILE, omClientResponse.getOMResponse().getStatus()); } + + private void createAndCheck(String keyName) throws Exception { + OMRequest omRequest = createKeyRequest(false, 0, keyName); + + OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest); + + omRequest = omKeyCreateRequest.preExecute(ozoneManager); + + omKeyCreateRequest = new OMKeyCreateRequest(omRequest); + + OMClientResponse omClientResponse = + omKeyCreateRequest.validateAndUpdateCache(ozoneManager, + 101L, ozoneManagerDoubleBufferHelper); + + Assert.assertEquals(OK, omClientResponse.getOMResponse().getStatus()); + + checkCreatedPaths(omKeyCreateRequest, omRequest, keyName); + } + + private void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest, + OMRequest omRequest, String keyName) throws Exception { + keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName); + // Check intermediate directories created or not. + Path keyPath = Paths.get(keyName); + checkIntermediatePaths(keyPath); + + // Check open key entry + String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, + keyName, omRequest.getCreateKeyRequest().getClientID()); + OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); + Assert.assertNotNull(omKeyInfo); + } + + + + private void checkIntermediatePaths(Path keyPath) throws Exception { + // Check intermediate paths are created + keyPath = keyPath.getParent(); + while(keyPath != null) { + Assert.assertNotNull(omMetadataManager.getKeyTable().get( + omMetadataManager.getOzoneDirKey(volumeName, bucketName, + keyPath.toString()))); + keyPath = keyPath.getParent(); + } + } + } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index b60d68ea675f..b8e560308077 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -136,43 +136,6 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { omClientResponse.getOMResponse().getStatus()); } - @Test - public void testReplayRequest() throws Exception { - OMRequest modifiedOmRequest = - doPreExecute(createDeleteKeyRequest()); - - OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(modifiedOmRequest); - - // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 1L, omMetadataManager); - - // Delete the key manually. Lets say the Delete Requests - // TransactionLogIndex is 10. - long deleteTrxnLogIndex = 10L; - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - TestOMRequestUtils.deleteKey(ozoneKey, omMetadataManager, 10L); - - // Create the same key again with TransactionLogIndex > Delete requests - // TransactionLogIndex - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 20L, omMetadataManager); - - // Replay the original DeleteRequest. - OMClientResponse omClientResponse = omKeyDeleteRequest - .validateAndUpdateCache(ozoneManager, deleteTrxnLogIndex, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } - /** * This method calls preExecute and verify the modified request. * @param originalOmRequest diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index 10b45ad54a02..31e6975775e4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -27,7 +27,6 @@ import org.junit.Test; import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -154,120 +153,4 @@ public void testValidateAndUpdateCache() throws Exception { deletedKey)); } } - - @Test - public void testPurgeKeysAcrossBuckets() throws Exception { - String bucket1 = bucketName; - String bucket2 = UUID.randomUUID().toString(); - - // bucket1 is created during setup. Create bucket2 manually. - TestOMRequestUtils.addBucketToDB(volumeName, bucket2, omMetadataManager); - - // Create and Delete keys in Bucket1 and Bucket2. - List deletedKeyInBucket1 = createAndDeleteKeys(1, bucket1); - List deletedKeyInBucket2 = createAndDeleteKeys(1, bucket2); - List deletedKeyNames = new ArrayList<>(); - deletedKeyNames.addAll(deletedKeyInBucket1); - deletedKeyNames.addAll(deletedKeyInBucket2); - - // The keys should be present in the DeletedKeys table before purging - for (String deletedKey : deletedKeyNames) { - Assert.assertTrue(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - - // Create PurgeKeysRequest to purge the deleted keys - DeletedKeys deletedKeysInBucket1 = DeletedKeys.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucket1) - .addAllKeys(deletedKeyInBucket1) - .build(); - DeletedKeys deletedKeysInBucket2 = DeletedKeys.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucket2) - .addAllKeys(deletedKeyInBucket1) - .build(); - PurgeKeysRequest purgeKeysRequest = PurgeKeysRequest.newBuilder() - .addDeletedKeys(deletedKeysInBucket1) - .addDeletedKeys(deletedKeysInBucket2) - .build(); - - OMRequest omRequest = OMRequest.newBuilder() - .setPurgeKeysRequest(purgeKeysRequest) - .setCmdType(Type.PurgeKeys) - .setClientId(UUID.randomUUID().toString()) - .build(); - - OMRequest preExecutedRequest = preExecute(omRequest); - OMKeyPurgeRequest omKeyPurgeRequest = - new OMKeyPurgeRequest(preExecutedRequest); - - omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = OMResponse.newBuilder() - .setPurgeKeysResponse(PurgeKeysResponse.getDefaultInstance()) - .setCmdType(Type.PurgeKeys) - .setStatus(Status.OK) - .build(); - - BatchOperation batchOperation = - omMetadataManager.getStore().initBatchOperation(); - - OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse( - omResponse, deletedKeyNames); - omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // The keys should not exist in the DeletedKeys table - for (String deletedKey : deletedKeyNames) { - Assert.assertFalse(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - } - - @Test - public void testReplayRequest() throws Exception { - - // Create and Delete keys. The keys should be moved to DeletedKeys table - Integer trxnLogIndex = new Integer(1); - List deletedKeyNames = createAndDeleteKeys(trxnLogIndex, null); - int purgeRequestTrxnLogIndex = ++trxnLogIndex; - - // The keys should be present in the DeletedKeys table before purging - for (String deletedKey : deletedKeyNames) { - Assert.assertTrue(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - - // Execute PurgeKeys request to purge the keys from Deleted table. - // Create PurgeKeysRequest to replay the purge request - OMRequest omRequest = createPurgeKeysRequest(deletedKeyNames); - OMRequest preExecutedRequest = preExecute(omRequest); - OMKeyPurgeRequest omKeyPurgeRequest = - new OMKeyPurgeRequest(preExecutedRequest); - OMClientResponse omClientResponse = omKeyPurgeRequest - .validateAndUpdateCache(ozoneManager, purgeRequestTrxnLogIndex, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus().equals( - Status.OK)); - - // Create and delete the same keys again - createAndDeleteKeys(++trxnLogIndex, null); - - // Replay the PurgeKeys request. It should not purge the keys deleted - // after the original request was played. - OMClientResponse replayResponse = omKeyPurgeRequest - .validateAndUpdateCache(ozoneManager, purgeRequestTrxnLogIndex, - ozoneManagerDoubleBufferHelper); - - // Verify that the new deletedKeys exist in the DeletedKeys table - for (String deletedKey : deletedKeyNames) { - Assert.assertTrue(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java index eb79d7a6346d..fc7f9b85787e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java @@ -22,11 +22,9 @@ import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; @@ -199,106 +197,6 @@ public void testValidateAndUpdateCacheWithFromKeyInvalid() throws Exception { omKeyRenameResponse.getOMResponse().getStatus()); } - /** - * Test replay of RenameRequest when fromKey does not exist in DB. - */ - @Test - public void testReplayRequest() throws Exception { - String toKeyName = UUID.randomUUID().toString(); - OMRequest modifiedOmRequest = doPreExecute( - createRenameKeyRequest(toKeyName)); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); - - // Execute RenameRequest - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(modifiedOmRequest); - OMClientResponse omKeyRenameResponse = - omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 10L, - ozoneManagerDoubleBufferHelper); - - // Commit Batch operation to add the transaction to DB - BatchOperation batchOperation = omMetadataManager.getStore() - .initBatchOperation(); - omKeyRenameResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // Replay the RenameRequest. - OMClientResponse replayResponse = omKeyRenameRequest.validateAndUpdateCache( - ozoneManager, 10L, ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } - - /** - * Test replay of RenameRequest when fromKey exists in DB. - */ - @Test - public void testReplayRequestWhenFromKeyExists() throws Exception { - - String toKeyName = UUID.randomUUID().toString(); - OMRequest modifiedOmRequest = doPreExecute( - createRenameKeyRequest(toKeyName)); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 1L, omMetadataManager); - - // Execute RenameRequest - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(modifiedOmRequest); - OMClientResponse omKeyRenameResponse = omKeyRenameRequest - .validateAndUpdateCache(ozoneManager, 10L, - ozoneManagerDoubleBufferHelper); - - // Commit Batch operation to add the transaction to DB - BatchOperation batchOperation = omMetadataManager.getStore() - .initBatchOperation(); - omKeyRenameResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // Let's say the fromKey create transaction was also replayed. In this - // case, fromKey and toKey will both exist in the DB. Replaying the - // RenameRequest should then delete fromKey but not add toKey again. - - // Replay CreateKey request for fromKey - TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 1L, omMetadataManager); - - // Verify fromKey exists in DB - String fromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - OmKeyInfo dbFromKeyInfo = omMetadataManager.getKeyTable().get(fromKey); - Assert.assertNotNull(dbFromKeyInfo); - - // Replay original RenameRequest - OMKeyRenameResponse replayResponse = - (OMKeyRenameResponse) omKeyRenameRequest.validateAndUpdateCache( - ozoneManager, 10L, ozoneManagerDoubleBufferHelper); - - // This replay response should delete fromKey from DB - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - replayResponse.getOMResponse().getStatus()); - Assert.assertTrue(replayResponse.deleteFromKeyOnly()); - - // Commit response to DB - batchOperation = omMetadataManager.getStore().initBatchOperation(); - replayResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // Verify fromKey is deleted from DB - dbFromKeyInfo = omMetadataManager.getKeyTable().get(fromKey); - Assert.assertNull(dbFromKeyInfo); - } - /** * This method calls preExecute and verify the modified request. * @param originalOmRequest diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 49794a1ed6f9..cb35e2b9358d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -22,7 +22,12 @@ import java.util.List; import java.util.UUID; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -63,6 +68,7 @@ public class TestOMKeyRequest { public TemporaryFolder folder = new TemporaryFolder(); protected OzoneManager ozoneManager; + protected KeyManager keyManager; protected OMMetrics omMetrics; protected OMMetadataManager omMetadataManager; protected AuditLogger auditLogger; @@ -110,6 +116,8 @@ public void setup() throws Exception { ozoneBlockTokenSecretManager = Mockito.mock(OzoneBlockTokenSecretManager.class); scmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class); + keyManager = new KeyManagerImpl(ozoneManager, scmClient, ozoneConfiguration, + ""); when(ozoneManager.getScmClient()).thenReturn(scmClient); when(ozoneManager.getBlockTokenSecretManager()) .thenReturn(ozoneBlockTokenSecretManager); @@ -118,6 +126,7 @@ public void setup() throws Exception { when(ozoneManager.isGrpcBlockTokenEnabled()).thenReturn(false); when(ozoneManager.getOMNodeId()).thenReturn(UUID.randomUUID().toString()); when(scmClient.getBlockClient()).thenReturn(scmBlockLocationProtocol); + when(ozoneManager.getKeyManager()).thenReturn(keyManager); Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) @@ -150,6 +159,11 @@ public void setup() throws Exception { clientID = Time.now(); dataSize = 1000L; + Pair volumeAndBucket = Pair.of(volumeName, bucketName); + when(ozoneManager.resolveBucketLink(any(KeyArgs.class))) + .thenReturn(new ResolvedBucket(volumeAndBucket, volumeAndBucket)); + when(ozoneManager.resolveBucketLink(any(Pair.class))) + .thenReturn(new ResolvedBucket(volumeAndBucket, volumeAndBucket)); } @After diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java new file mode 100644 index 000000000000..ac50af8bd6be --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; + +/** + * Class tests OMKeysDeleteRequest. + */ +public class TestOMKeysDeleteRequest extends TestOMKeyRequest { + + + private List deleteKeyList; + private OMRequest omRequest; + + @Test + public void testKeysDeleteRequest() throws Exception { + + createPreRequisites(); + + OMKeysDeleteRequest omKeysDeleteRequest = + new OMKeysDeleteRequest(omRequest); + + OMClientResponse omClientResponse = + omKeysDeleteRequest.validateAndUpdateCache(ozoneManager, 0L, + ozoneManagerDoubleBufferHelper); + + Assert.assertTrue(omClientResponse.getOMResponse().getSuccess()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + + Assert.assertTrue(omClientResponse.getOMResponse().getDeleteKeysResponse() + .getStatus()); + DeleteKeyArgs unDeletedKeys = + omClientResponse.getOMResponse().getDeleteKeysResponse() + .getUnDeletedKeys(); + Assert.assertEquals(0, + unDeletedKeys.getKeysCount()); + + // Check all keys are deleted. + for (String deleteKey : deleteKeyList) { + Assert.assertNull(omMetadataManager.getKeyTable() + .get(omMetadataManager.getOzoneKey(volumeName, bucketName, + deleteKey))); + } + + } + + @Test + public void testKeysDeleteRequestFail() throws Exception { + + createPreRequisites(); + + // Add a key which not exist, which causes batch delete to fail. + + omRequest = omRequest.toBuilder() + .setDeleteKeysRequest(DeleteKeysRequest.newBuilder() + .setDeleteKeys(DeleteKeyArgs.newBuilder() + .setBucketName(bucketName).setVolumeName(volumeName) + .addAllKeys(deleteKeyList).addKeys("dummy"))).build(); + + OMKeysDeleteRequest omKeysDeleteRequest = + new OMKeysDeleteRequest(omRequest); + + OMClientResponse omClientResponse = + omKeysDeleteRequest.validateAndUpdateCache(ozoneManager, 0L, + ozoneManagerDoubleBufferHelper); + + Assert.assertFalse(omClientResponse.getOMResponse().getSuccess()); + Assert.assertEquals(PARTIAL_DELETE, + omClientResponse.getOMResponse().getStatus()); + + Assert.assertFalse(omClientResponse.getOMResponse().getDeleteKeysResponse() + .getStatus()); + + // Check keys are deleted and in response check unDeletedKey. + for (String deleteKey : deleteKeyList) { + Assert.assertNull(omMetadataManager.getKeyTable() + .get(omMetadataManager.getOzoneKey(volumeName, bucketName, + deleteKey))); + } + + DeleteKeyArgs unDeletedKeys = omClientResponse.getOMResponse() + .getDeleteKeysResponse().getUnDeletedKeys(); + Assert.assertEquals(1, + unDeletedKeys.getKeysCount()); + Assert.assertEquals("dummy", unDeletedKeys.getKeys(0)); + + } + + private void createPreRequisites() throws Exception { + + deleteKeyList = new ArrayList<>(); + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + int count = 10; + + DeleteKeyArgs.Builder deleteKeyArgs = DeleteKeyArgs.newBuilder() + .setBucketName(bucketName).setVolumeName(volumeName); + + // Create 10 keys + String parentDir = "/user"; + String key = ""; + + + for (int i = 0; i < count; i++) { + key = parentDir.concat("/key" + i); + TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, + parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, omMetadataManager); + deleteKeyArgs.addKeys(key); + deleteKeyList.add(key); + } + + omRequest = + OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) + .setCmdType(DeleteKeys) + .setDeleteKeysRequest(DeleteKeysRequest.newBuilder() + .setDeleteKeys(deleteKeyArgs).build()).build(); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java new file mode 100644 index 000000000000..947590660a84 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +/** + * Tests RenameKey request. + */ +public class TestOMKeysRenameRequest extends TestOMKeyRequest { + + private int count = 10; + private String parentDir = "/test"; + + @Test + public void testKeysRenameRequest() throws Exception { + + OMRequest modifiedOmRequest = createRenameKeyRequest(false); + + OMKeysRenameRequest omKeysRenameRequest = + new OMKeysRenameRequest(modifiedOmRequest); + + OMClientResponse omKeysRenameResponse = + omKeysRenameRequest.validateAndUpdateCache(ozoneManager, 100L, + ozoneManagerDoubleBufferHelper); + + Assert.assertTrue(omKeysRenameResponse.getOMResponse().getSuccess()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omKeysRenameResponse.getOMResponse().getStatus()); + + for (int i = 0; i < count; i++) { + // Original key should be deleted, toKey should exist. + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get( + omMetadataManager.getOzoneKey(volumeName, bucketName, + parentDir.concat("/key" + i))); + Assert.assertNull(omKeyInfo); + + omKeyInfo = + omMetadataManager.getKeyTable().get(omMetadataManager.getOzoneKey( + volumeName, bucketName, parentDir.concat("/newKey" + i))); + Assert.assertNotNull(omKeyInfo); + } + + } + + @Test + public void testKeysRenameRequestFail() throws Exception { + OMRequest modifiedOmRequest = createRenameKeyRequest(true); + + OMKeysRenameRequest omKeysRenameRequest = + new OMKeysRenameRequest(modifiedOmRequest); + + OMClientResponse omKeysRenameResponse = + omKeysRenameRequest.validateAndUpdateCache(ozoneManager, 100L, + ozoneManagerDoubleBufferHelper); + + Assert.assertFalse(omKeysRenameResponse.getOMResponse().getSuccess()); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.PARTIAL_RENAME, + omKeysRenameResponse.getOMResponse().getStatus()); + + // The keys(key0 to key9)can be renamed success. + for (int i = 0; i < count; i++) { + // Original key should be deleted, toKey should exist. + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get( + omMetadataManager.getOzoneKey(volumeName, bucketName, + parentDir.concat("/key" + i))); + Assert.assertNull(omKeyInfo); + + omKeyInfo = + omMetadataManager.getKeyTable().get(omMetadataManager.getOzoneKey( + volumeName, bucketName, parentDir.concat("/newKey" + i))); + Assert.assertNotNull(omKeyInfo); + } + + // The key not rename should be in unRenamedKeys. + RenameKeysMap unRenamedKeys = omKeysRenameResponse.getOMResponse() + .getRenameKeysResponse().getUnRenamedKeys(0); + Assert.assertEquals("testKey", unRenamedKeys.getFromKeyName()); + } + + /** + * Create OMRequest which encapsulates RenameKeyRequest. + * + * @return OMRequest + */ + private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { + + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + List renameKeyList = new ArrayList<>(); + + for (int i = 0; i < count; i++) { + String key = parentDir.concat("/key" + i); + String toKey = parentDir.concat("/newKey" + i); + TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, + parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, omMetadataManager); + + RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() + .setFromKeyName(key) + .setToKeyName(toKey); + renameKeyList.add(renameKey.build()); + } + + + // Generating illegal data causes Rename Keys to fail. + if (isIllegal) { + RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() + .setFromKeyName("testKey") + .setToKeyName("toKey"); + renameKeyList.add(renameKey.build()); + } + + RenameKeysArgs.Builder renameKeyArgs = RenameKeysArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .addAllRenameKeysMap(renameKeyList); + + RenameKeysRequest.Builder renameKeysReq = RenameKeysRequest.newBuilder() + .setRenameKeysArgs(renameKeyArgs.build()); + + return OMRequest.newBuilder() + .setClientId(UUID.randomUUID().toString()) + .setRenameKeysRequest(renameKeysReq.build()) + .setCmdType(OzoneManagerProtocolProtos.Type.RenameKeys).build(); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java index c25ee7b5baa1..5690ff20cb65 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java @@ -40,7 +40,7 @@ public class TestOMPrefixAclRequest extends TestOMKeyRequest { @Test - public void testReplayRequest() throws Exception { + public void testAclRequest() throws Exception { PrefixManager prefixManager = new PrefixManagerImpl( ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); @@ -66,16 +66,9 @@ public void testReplayRequest() throws Exception { ozoneManagerDoubleBufferHelper); Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); - - // Replay the original request - OMClientResponse replayResponse = omKeyPrefixAclRequest - .validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); } + /** * Create OMRequest which encapsulates OMKeyAddAclRequest. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index 1d785609b566..33fd1cd215b2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -21,6 +21,14 @@ import java.util.UUID; +import com.google.common.base.Optional; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; @@ -28,6 +36,12 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.mockito.Mockito; + +import static org.apache.hadoop.crypto.CipherSuite.AES_CTR_NOPADDING; +import static org.apache.hadoop.crypto.CryptoProtocolVersion.ENCRYPTION_ZONES; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_SUPPORTED_OPERATION; +import static org.mockito.Mockito.when; /** * Tests S3 Initiate Multipart Upload request. @@ -36,7 +50,7 @@ public class TestS3InitiateMultipartUploadRequest extends TestS3MultipartRequest { @Test - public void testPreExecute() { + public void testPreExecute() throws Exception { doPreExecuteInitiateMPU(UUID.randomUUID().toString(), UUID.randomUUID().toString(), UUID.randomUUID().toString()); } @@ -150,4 +164,47 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { .get(multipartKey)); } + + @Test + public void testMPUNotSupported() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + + when(ozoneManager.getKmsProvider()) + .thenReturn(Mockito.mock(KeyProviderCryptoExtension.class)); + + TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + + // Set encryption info and create bucket + OmBucketInfo omBucketInfo = + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName).setCreationTime(Time.now()) + .setBucketEncryptionKey(new BucketEncryptionKeyInfo.Builder() + .setKeyName("dummy").setSuite(AES_CTR_NOPADDING) + .setVersion(ENCRYPTION_ZONES).build()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + + omMetadataManager.getBucketTable().put(bucketKey, omBucketInfo); + + omMetadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), + new CacheValue<>(Optional.of(omBucketInfo), 100L)); + + OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, bucketName, + keyName); + + OMClientRequest omClientRequest = + new S3InitiateMultipartUploadRequest(modifiedRequest); + + OMClientResponse omClientResponse = + omClientRequest.validateAndUpdateCache(ozoneManager, 1L, + ozoneManagerDoubleBufferHelper); + + Assert.assertNotNull(omClientResponse.getOMResponse()); + Assert.assertEquals(NOT_SUPPORTED_OPERATION, + omClientResponse.getOMResponse().getStatus()); + + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index 99500274628f..f0f040f39479 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -29,6 +29,7 @@ import org.junit.rules.TemporaryFolder; import org.mockito.Mockito; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.AuditMessage; @@ -37,6 +38,8 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; @@ -79,6 +82,13 @@ public void setup() throws Exception { auditLogger = Mockito.mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + when(ozoneManager.resolveBucketLink(any(KeyArgs.class))) + .thenAnswer(inv -> { + KeyArgs args = (KeyArgs) inv.getArguments()[0]; + return new ResolvedBucket( + Pair.of(args.getVolumeName(), args.getBucketName()), + Pair.of(args.getVolumeName(), args.getBucketName())); + }); } @@ -97,7 +107,7 @@ public void stop() { * @return OMRequest - returned from preExecute. */ protected OMRequest doPreExecuteInitiateMPU( - String volumeName, String bucketName, String keyName) { + String volumeName, String bucketName, String keyName) throws Exception { OMRequest omRequest = TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName, keyName); @@ -131,7 +141,8 @@ protected OMRequest doPreExecuteInitiateMPU( */ protected OMRequest doPreExecuteCommitMPU( String volumeName, String bucketName, String keyName, - long clientID, String multipartUploadID, int partNumber) { + long clientID, String multipartUploadID, int partNumber) + throws Exception { // Just set dummy size long dataSize = 100L; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 5b220bf4c874..d623b17dcfe1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -38,7 +38,7 @@ public class TestS3MultipartUploadCommitPartRequest extends TestS3MultipartRequest { @Test - public void testPreExecute() { + public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java index 27973ed70d83..4ac1f494b009 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java @@ -244,30 +244,4 @@ private void verifyRequest(OMRequest modifiedRequest, Assert.assertNotEquals(original.getModificationTime(), updated.getModificationTime()); } - - @Test - public void testReplayRequest() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String adminName = "user1"; - String ownerName = "user1"; - OMRequest originalRequest = createVolumeRequest(volumeName, adminName, - ownerName); - OMVolumeCreateRequest omVolumeCreateRequest = - new OMVolumeCreateRequest(originalRequest); - - // Execute the original request - omVolumeCreateRequest.preExecute(ozoneManager); - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java index 709f82149227..49f28d3ef9b1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java @@ -157,42 +157,4 @@ private OMRequest deleteVolumeRequest(String volumeName) { .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) .setDeleteVolumeRequest(deleteVolumeRequest).build(); } - - @Test - public void testReplayRequest() throws Exception { - - // create volume request - String volumeName = UUID.randomUUID().toString(); - String user = "user1"; - OMVolumeCreateRequest omVolumeCreateRequest = new OMVolumeCreateRequest( - createVolumeRequest(volumeName, user, user)); - - // Execute createVolume request - omVolumeCreateRequest.preExecute(ozoneManager); - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OMRequest originalDeleteRequest = deleteVolumeRequest(volumeName); - OMVolumeDeleteRequest omVolumeDeleteRequest = - new OMVolumeDeleteRequest(originalDeleteRequest); - - // Execute the original request - omVolumeDeleteRequest.preExecute(ozoneManager); - omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - // Create the volume again - omVolumeCreateRequest.preExecute(ozoneManager); - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 3, - ozoneManagerDoubleBufferHelper); - - // Replay the delete transaction - Execute the same request again - OMClientResponse omClientResponse = - omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java index 0e1ac5475277..4ccf195de42e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java @@ -169,37 +169,6 @@ public void testInvalidRequest() throws Exception { omResponse.getStatus()); } - @Test - public void testReplayRequest() throws Exception { - // create volume - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - // create request to set new owner - String newOwnerName = "user2"; - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, - newOwnerName); - OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = - new OMVolumeSetOwnerRequest(originalRequest); - - // Execute the original request - omVolumeSetOwnerRequest.preExecute(ozoneManager); - omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = - omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } - @Test public void testOwnSameVolumeTwice() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java index bd90222dc32e..4d78ef0a37c6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java @@ -150,34 +150,4 @@ public void testInvalidRequest() throws Exception { Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_REQUEST, omResponse.getStatus()); } - - @Test - public void testReplayRequest() throws Exception { - // create volume - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - // create request with quota set. - long quota = 100L; - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quota); - OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = - new OMVolumeSetQuotaRequest(originalRequest); - - // Execute the original request - omVolumeSetQuotaRequest.preExecute(ozoneManager); - omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay the transaction - Execute the same request again - OMClientResponse omClientResponse = - omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // Replay should result in Replay response - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - omClientResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java index 8c79e029fe18..66a122f298d5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java @@ -119,37 +119,4 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, omResponse.getStatus()); } - - @Test - public void testReplayRequest() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); - - OMRequest originalRequest = TestOMRequestUtils.createVolumeAddAclRequest( - volumeName, acl); - - OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest( - originalRequest); - omVolumeAddAclRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = omVolumeAddAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - // Replay the original request - OMClientResponse replayResponse = omVolumeAddAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java index b1bbf13e0664..b2eb0bf99b6d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java @@ -129,48 +129,4 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, omResponse.getStatus()); } - - @Test - public void testReplayRequest() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); - - // add acl first - OMRequest addAclRequest = TestOMRequestUtils.createVolumeAddAclRequest( - volumeName, acl); - OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest( - addAclRequest); - omVolumeAddAclRequest.preExecute(ozoneManager); - OMClientResponse addAclResponse = omVolumeAddAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - addAclResponse.getOMResponse().getStatus()); - - // remove acl - OMRequest removeAclRequest = TestOMRequestUtils - .createVolumeRemoveAclRequest(volumeName, acl); - OMVolumeRemoveAclRequest omVolumeRemoveAclRequest = - new OMVolumeRemoveAclRequest(removeAclRequest); - omVolumeRemoveAclRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = omVolumeRemoveAclRequest - .validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - // Replay the original request - OMClientResponse replayResponse = omVolumeRemoveAclRequest - .validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java index 6d0f2b13ecbe..087ba713f6cf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java @@ -132,39 +132,4 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, omResponse.getStatus()); } - - @Test - public void testReplayRequest() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OzoneAcl userAccessAcl = OzoneAcl.parseAcl("user:bilbo:rw[ACCESS]"); - OzoneAcl groupDefaultAcl = OzoneAcl.parseAcl( - "group:admin:rwdlncxy[DEFAULT]"); - - List acls = Lists.newArrayList(userAccessAcl, groupDefaultAcl); - - OMRequest originalRequest = TestOMRequestUtils.createVolumeSetAclRequest( - volumeName, acls); - - OMVolumeSetAclRequest omVolumeSetAclRequest = new OMVolumeSetAclRequest( - originalRequest); - omVolumeSetAclRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = omVolumeSetAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - OMClientResponse replayResponse = omVolumeSetAclRequest - .validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, - replayResponse.getOMResponse().getStatus()); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java index cca0dad28616..fbd3af0b44a3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; @@ -36,6 +37,7 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; +import java.util.ArrayList; import java.util.UUID; /** @@ -75,7 +77,8 @@ public void testAddToDBBatch() throws Exception { .build(); OMDirectoryCreateResponse omDirectoryCreateResponse = - new OMDirectoryCreateResponse(omResponse, omKeyInfo, null); + new OMDirectoryCreateResponse(omResponse, omKeyInfo, + new ArrayList<>(), Result.SUCCESS); omDirectoryCreateResponse.addToDBBatch(omMetadataManager, batchOperation); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index f8b0a17dc3a0..b2626da18905 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -152,7 +152,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); - omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); + omKeyDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); // Do manual commit and see whether addToBatch is successful or not. omMetadataManager.getStore().commitBatchOperation(batchOperation); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java new file mode 100644 index 000000000000..c5dd96b05931 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; + +/** + * Class to test OMKeysDeleteResponse. + */ +public class TestOMKeysDeleteResponse extends TestOMKeyResponse { + + + private List omKeyInfoList; + private List ozoneKeys; + + + private void createPreRequisities() throws Exception { + String parent = "/user"; + String key = "key"; + + omKeyInfoList = new ArrayList<>(); + ozoneKeys = new ArrayList<>(); + String ozoneKey = ""; + for (int i = 0; i < 10; i++) { + keyName = parent.concat(key + i); + TestOMRequestUtils.addKeyToTable(false, volumeName, + bucketName, keyName, 0L, RATIS, THREE, omMetadataManager); + ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + omKeyInfoList.add(omMetadataManager.getKeyTable().get(ozoneKey)); + ozoneKeys.add(ozoneKey); + } + } + + @Test + public void testKeysDeleteResponse() throws Exception { + + createPreRequisities(); + + OMResponse omResponse = + OMResponse.newBuilder().setCmdType(DeleteKeys).setStatus(OK) + .setSuccess(true) + .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(true)).build(); + OMClientResponse omKeysDeleteResponse = + new OMKeysDeleteResponse(omResponse, omKeyInfoList, 10L, true); + + omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + for (String ozKey : ozoneKeys) { + Assert.assertNull(omMetadataManager.getKeyTable().get(ozKey)); + + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(ozKey); + Assert.assertNotNull(repeatedOmKeyInfo); + + Assert.assertEquals(1, repeatedOmKeyInfo.getOmKeyInfoList().size()); + Assert.assertEquals(10L, + repeatedOmKeyInfo.getOmKeyInfoList().get(0).getUpdateID()); + + } + + } + + @Test + public void testKeysDeleteResponseFail() throws Exception { + createPreRequisities(); + + OMResponse omResponse = + OMResponse.newBuilder().setCmdType(DeleteKeys).setStatus(KEY_NOT_FOUND) + .setSuccess(false) + .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(false)).build(); + + + OMClientResponse omKeysDeleteResponse = + new OMKeysDeleteResponse(omResponse, omKeyInfoList, 10L, true); + + omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + + + for (String ozKey : ozoneKeys) { + Assert.assertNotNull(omMetadataManager.getKeyTable().get(ozKey)); + + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(ozKey); + Assert.assertNull(repeatedOmKeyInfo); + + } + + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java new file mode 100644 index 000000000000..a9db1b839758 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; + +/** + * Tests OMKeyRenameResponse. + */ +public class TestOMKeysRenameResponse extends TestOMKeyResponse { + private OmRenameKeys omRenameKeys; + private int count = 10; + private String parentDir = "/test"; + + @Test + public void testKeysRenameResponse() throws Exception { + + createPreRequisities(); + + OMResponse omResponse = OMResponse.newBuilder() + .setRenameKeysResponse(RenameKeysResponse.getDefaultInstance()) + .setStatus(Status.OK).setCmdType(Type.RenameKeys).build(); + + OMKeysRenameResponse omKeysRenameResponse = new OMKeysRenameResponse( + omResponse, omRenameKeys); + + omKeysRenameResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + for (int i = 0; i < count; i++) { + String key = parentDir.concat("/key" + i); + String toKey = parentDir.concat("/newKey" + i); + key = omMetadataManager.getOzoneKey(volumeName, bucketName, key); + toKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKey); + Assert.assertFalse(omMetadataManager.getKeyTable().isExist(key)); + Assert.assertTrue(omMetadataManager.getKeyTable().isExist(toKey)); + } + } + + @Test + public void testKeysRenameResponseFail() throws Exception { + + createPreRequisities(); + + OMResponse omResponse = OMResponse.newBuilder().setRenameKeysResponse( + RenameKeysResponse.getDefaultInstance()) + .setStatus(Status.KEY_NOT_FOUND) + .setCmdType(Type.RenameKeys) + .build(); + + OMKeysRenameResponse omKeyRenameResponse = new OMKeysRenameResponse( + omResponse, omRenameKeys); + + omKeyRenameResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + for (int i = 0; i < count; i++) { + String key = parentDir.concat("/key" + i); + String toKey = parentDir.concat("/newKey" + i); + key = omMetadataManager.getOzoneKey(volumeName, bucketName, key); + toKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKey); + // As omResponse has error, it is a no-op. So, no changes should happen. + Assert.assertTrue(omMetadataManager.getKeyTable().isExist(key)); + Assert.assertFalse(omMetadataManager.getKeyTable().isExist(toKey)); + } + + } + + private void createPreRequisities() throws Exception { + + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + Map formAndToKeyInfo = new HashMap<>(); + + for (int i = 0; i < count; i++) { + String key = parentDir.concat("/key" + i); + String toKey = parentDir.concat("/newKey" + i); + TestOMRequestUtils.addKeyToTable(false, volumeName, + bucketName, parentDir.concat("/key" + i), 0L, RATIS, THREE, + omMetadataManager); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get( + omMetadataManager.getOzoneKey(volumeName, bucketName, key)); + omKeyInfo.setKeyName(toKey); + formAndToKeyInfo.put(key, omKeyInfo); + } + omRenameKeys = + new OmRenameKeys(volumeName, bucketName, null, formAndToKeyInfo); + + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java index 518953f91c62..391759a8df54 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.TestSSLFactory; import org.apache.hadoop.security.token.Token; @@ -327,4 +328,22 @@ public void testTokenSerialization() throws IOException { idDecode.readFields(in); Assert.assertEquals(idEncode, idDecode); } + + @Test + public void testTokenPersistence() throws IOException { + OzoneTokenIdentifier idWrite = getIdentifierInst(); + idWrite.setOmServiceId("defaultServiceId"); + + byte[] oldIdBytes = idWrite.getBytes(); + TokenIdentifierCodec idCodec = new TokenIdentifierCodec(); + + OzoneTokenIdentifier idRead = null; + try { + idRead = idCodec.fromPersistedFormat(oldIdBytes); + } catch (IOException ex) { + Assert.fail("Should not fail to load old token format"); + } + Assert.assertEquals("Deserialize Serialized Token should equal.", + idWrite, idRead); + } } \ No newline at end of file diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index d636b1f2302d..968262fa6cb4 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem-common Apache Hadoop Ozone FileSystem Common jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index a2f4c174b964..0b5098886f61 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -25,12 +25,12 @@ import java.util.Iterator; import java.util.List; -import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -176,7 +176,7 @@ public void close() throws IOException { @Override public InputStream readFile(String key) throws IOException { - incrementCounter(Statistic.OBJECTS_READ); + incrementCounter(Statistic.OBJECTS_READ, 1); try { return bucket.readFile(key).getInputStream(); } catch (OMException ex) { @@ -190,14 +190,14 @@ public InputStream readFile(String key) throws IOException { } } - protected void incrementCounter(Statistic objectsRead) { + protected void incrementCounter(Statistic objectsRead, long count) { //noop: Use OzoneClientAdapterImpl which supports statistics. } @Override public OzoneFSOutputStream createFile(String key, short replication, boolean overWrite, boolean recursive) throws IOException { - incrementCounter(Statistic.OBJECTS_CREATED); + incrementCounter(Statistic.OBJECTS_CREATED, 1); try { OzoneOutputStream ozoneOutputStream = null; if (replication == ReplicationFactor.ONE.getValue() @@ -224,7 +224,7 @@ public OzoneFSOutputStream createFile(String key, short replication, @Override public void renameKey(String key, String newKeyName) throws IOException { - incrementCounter(Statistic.OBJECTS_RENAMED); + incrementCounter(Statistic.OBJECTS_RENAMED, 1); bucket.renameKey(key, newKeyName); } @@ -242,7 +242,7 @@ public void rename(String pathStr, String newPath) throws IOException { @Override public boolean createDirectory(String keyName) throws IOException { LOG.trace("creating dir for key:{}", keyName); - incrementCounter(Statistic.OBJECTS_CREATED); + incrementCounter(Statistic.OBJECTS_CREATED, 1); try { bucket.createDirectory(keyName); } catch (OMException e) { @@ -264,7 +264,7 @@ public boolean createDirectory(String keyName) throws IOException { public boolean deleteObject(String keyName) { LOG.trace("issuing delete for key {}", keyName); try { - incrementCounter(Statistic.OBJECTS_DELETED); + incrementCounter(Statistic.OBJECTS_DELETED, 1); bucket.deleteKey(keyName); return true; } catch (IOException ioe) { @@ -281,9 +281,8 @@ public boolean deleteObject(String keyName) { */ @Override public boolean deleteObjects(List keyNameList) { - LOG.trace("issuing delete for key {}", keyNameList); try { - incrementCounter(Statistic.OBJECTS_DELETED); + incrementCounter(Statistic.OBJECTS_DELETED, keyNameList.size()); bucket.deleteKeys(keyNameList); return true; } catch (IOException ioe) { @@ -296,7 +295,7 @@ public FileStatusAdapter getFileStatus(String key, URI uri, Path qualifiedPath, String userName) throws IOException { try { - incrementCounter(Statistic.OBJECTS_QUERY); + incrementCounter(Statistic.OBJECTS_QUERY, 1); OzoneFileStatus status = bucket.getFileStatus(key); return toFileStatusAdapter(status, userName, uri, qualifiedPath); @@ -312,7 +311,7 @@ public FileStatusAdapter getFileStatus(String key, URI uri, @Override public Iterator listKeys(String pathKey) { - incrementCounter(Statistic.OBJECTS_LIST); + incrementCounter(Statistic.OBJECTS_LIST, 1); return new IteratorAdapter(bucket.listKeys(pathKey)); } @@ -320,7 +319,7 @@ public List listStatus(String keyName, boolean recursive, String startKey, long numEntries, URI uri, Path workingDir, String username) throws IOException { try { - incrementCounter(Statistic.OBJECTS_LIST); + incrementCounter(Statistic.OBJECTS_LIST, 1); List statuses = bucket .listStatus(keyName, recursive, startKey, numEntries); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index e723df7fdae5..e4acabc21443 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -26,11 +26,16 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -38,6 +43,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; @@ -205,7 +211,7 @@ public String getScheme() { @Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { - incrementCounter(Statistic.INVOCATION_OPEN); + incrementCounter(Statistic.INVOCATION_OPEN, 1); statistics.incrementReadOps(1); LOG.trace("open() path:{}", f); final String key = pathToKey(f); @@ -218,7 +224,11 @@ protected InputStream createFSInputStream(InputStream inputStream) { } protected void incrementCounter(Statistic statistic) { - //don't do anyting in this default implementation. + incrementCounter(statistic, 1); + } + + protected void incrementCounter(Statistic statistic, long count) { + //don't do anything in this default implementation. } @Override @@ -227,7 +237,7 @@ public FSDataOutputStream create(Path f, FsPermission permission, short replication, long blockSize, Progressable progress) throws IOException { LOG.trace("create() path:{}", f); - incrementCounter(Statistic.INVOCATION_CREATE); + incrementCounter(Statistic.INVOCATION_CREATE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(f); return createOutputStream(key, replication, overwrite, true); @@ -241,7 +251,7 @@ public FSDataOutputStream createNonRecursive(Path path, short replication, long blockSize, Progressable progress) throws IOException { - incrementCounter(Statistic.INVOCATION_CREATE_NON_RECURSIVE); + incrementCounter(Statistic.INVOCATION_CREATE_NON_RECURSIVE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(path); return createOutputStream(key, @@ -299,7 +309,7 @@ boolean processKey(List keyList) throws IOException { */ @Override public boolean rename(Path src, Path dst) throws IOException { - incrementCounter(Statistic.INVOCATION_RENAME); + incrementCounter(Statistic.INVOCATION_RENAME, 1); statistics.incrementWriteOps(1); super.checkPath(src); super.checkPath(dst); @@ -397,6 +407,34 @@ public boolean rename(Path src, Path dst) throws IOException { return result; } + /** + * Intercept rename to trash calls from TrashPolicyDefault, + * convert them to delete calls instead. + */ + @Deprecated + protected void rename(final Path src, final Path dst, + final Rename... options) throws IOException { + boolean hasMoveToTrash = false; + if (options != null) { + for (Rename option : options) { + if (option == Rename.TO_TRASH) { + hasMoveToTrash = true; + break; + } + } + } + if (!hasMoveToTrash) { + // if doesn't have TO_TRASH option, just pass the call to super + super.rename(src, dst, options); + } else { + // intercept when TO_TRASH is found + LOG.info("Move to trash is disabled for o3fs, deleting instead: {}. " + + "Files or directories will NOT be retained in trash. " + + "Ignore the following TrashPolicyDefault message, if any.", src); + delete(src, true); + } + } + private class DeleteIterator extends OzoneListingIterator { private boolean recursive; @@ -448,16 +486,9 @@ private boolean innerDelete(Path f, boolean recursive) throws IOException { } } - /** - * {@inheritDoc} - * - * OFS supports volume and bucket deletion, recursive or non-recursive. - * e.g. delete(new Path("/volume1"), true) - * But root deletion is explicitly disallowed for safety concerns. - */ @Override public boolean delete(Path f, boolean recursive) throws IOException { - incrementCounter(Statistic.INVOCATION_DELETE); + incrementCounter(Statistic.INVOCATION_DELETE, 1); statistics.incrementWriteOps(1); LOG.debug("Delete path {} - recursive {}", f, recursive); FileStatus status; @@ -477,9 +508,7 @@ public boolean delete(Path f, boolean recursive) throws IOException { result = innerDelete(f, recursive); } else { LOG.debug("delete: Path is a file: {}", f); - List keyList = new ArrayList<>(); - keyList.add(key); - result = adapter.deleteObjects(keyList); + result = adapter.deleteObject(key); } if (result) { @@ -539,7 +568,7 @@ private boolean o3Exists(final Path f) throws IOException { @Override public FileStatus[] listStatus(Path f) throws IOException { - incrementCounter(Statistic.INVOCATION_LIST_STATUS); + incrementCounter(Statistic.INVOCATION_LIST_STATUS, 1); statistics.incrementReadOps(1); LOG.trace("listStatus() path:{}", f); int numEntries = LISTING_PAGE_SIZE; @@ -666,6 +695,7 @@ private boolean mkdir(Path path) throws IOException { @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { + incrementCounter(Statistic.INVOCATION_MKDIRS); LOG.trace("mkdir() path:{} ", f); String key = pathToKey(f); if (isEmpty(key)) { @@ -676,7 +706,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { @Override public FileStatus getFileStatus(Path f) throws IOException { - incrementCounter(Statistic.INVOCATION_GET_FILE_STATUS); + incrementCounter(Statistic.INVOCATION_GET_FILE_STATUS, 1); statistics.incrementReadOps(1); LOG.trace("getFileStatus() path:{}", f); Path qualifiedPath = f.makeQualified(uri, workingDir); @@ -709,6 +739,73 @@ public short getDefaultReplication() { return adapter.getDefaultReplication(); } + @Override + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, + Path dst) throws IOException { + incrementCounter(Statistic.INVOCATION_COPY_FROM_LOCAL_FILE); + super.copyFromLocalFile(delSrc, overwrite, srcs, dst); + } + + @Override + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, + Path dst) throws IOException { + incrementCounter(Statistic.INVOCATION_COPY_FROM_LOCAL_FILE); + super.copyFromLocalFile(delSrc, overwrite, src, dst); + } + + @Override + public boolean exists(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_EXISTS); + return super.exists(f); + } + + @Override + public FileChecksum getFileChecksum(Path f, long length) throws IOException { + incrementCounter(Statistic.INVOCATION_GET_FILE_CHECKSUM); + return super.getFileChecksum(f, length); + } + + @Override + public FileStatus[] globStatus(Path pathPattern) throws IOException { + incrementCounter(Statistic.INVOCATION_GLOB_STATUS); + return super.globStatus(pathPattern); + } + + @Override + public FileStatus[] globStatus(Path pathPattern, PathFilter filter) + throws IOException { + incrementCounter(Statistic.INVOCATION_GLOB_STATUS); + return super.globStatus(pathPattern, filter); + } + + @Override + @SuppressWarnings("deprecation") + public boolean isDirectory(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_IS_DIRECTORY); + return super.isDirectory(f); + } + + @Override + @SuppressWarnings("deprecation") + public boolean isFile(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_IS_FILE); + return super.isFile(f); + } + + @Override + public RemoteIterator listFiles(Path f, boolean recursive) + throws IOException { + incrementCounter(Statistic.INVOCATION_LIST_FILES); + return super.listFiles(f, recursive); + } + + @Override + public RemoteIterator listLocatedStatus(Path f) + throws IOException { + incrementCounter(Statistic.INVOCATION_LIST_LOCATED_STATUS); + return super.listLocatedStatus(f); + } + /** * Turn a path (relative or otherwise) into an Ozone key. * @@ -721,9 +818,15 @@ public String pathToKey(Path path) { path = new Path(workingDir, path); } // removing leading '/' char - String key = path.toUri().getPath().substring(1); + String key = path.toUri().getPath(); + + if (OzoneFSUtils.isValidName(key)) { + key = path.toUri().getPath(); + } else { + throw new InvalidPathException("Invalid path Name" + key); + } LOG.trace("path for key:{} is:{}", key, path); - return key; + return key.substring(1); } /** diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index d083aa3dcfbd..a4883350b588 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -27,10 +27,10 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.stream.Collectors; import com.google.common.base.Preconditions; import org.apache.commons.collections.CollectionUtils; -import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -284,7 +285,7 @@ public void close() throws IOException { @Override public InputStream readFile(String pathStr) throws IOException { - incrementCounter(Statistic.OBJECTS_READ); + incrementCounter(Statistic.OBJECTS_READ, 1); OFSPath ofsPath = new OFSPath(pathStr); String key = ofsPath.getKeyName(); try { @@ -301,14 +302,14 @@ public InputStream readFile(String pathStr) throws IOException { } } - protected void incrementCounter(Statistic objectsRead) { - //noop: Use OzoneClientAdapterImpl which supports statistics. + protected void incrementCounter(Statistic objectsRead, long count) { + //noop: Use RootedOzoneClientAdapterImpl which supports statistics. } @Override public OzoneFSOutputStream createFile(String pathStr, short replication, boolean overWrite, boolean recursive) throws IOException { - incrementCounter(Statistic.OBJECTS_CREATED); + incrementCounter(Statistic.OBJECTS_CREATED, 1); OFSPath ofsPath = new OFSPath(pathStr); if (ofsPath.isRoot() || ofsPath.isVolume() || ofsPath.isBucket()) { throw new IOException("Cannot create file under root or volume."); @@ -358,7 +359,7 @@ public void renameKey(String key, String newKeyName) throws IOException { */ @Override public void rename(String path, String newPath) throws IOException { - incrementCounter(Statistic.OBJECTS_RENAMED); + incrementCounter(Statistic.OBJECTS_RENAMED, 1); OFSPath ofsPath = new OFSPath(path); OFSPath ofsNewPath = new OFSPath(newPath); @@ -384,7 +385,7 @@ public void rename(String path, String newPath) throws IOException { */ void rename(OzoneBucket bucket, String path, String newPath) throws IOException { - incrementCounter(Statistic.OBJECTS_RENAMED); + incrementCounter(Statistic.OBJECTS_RENAMED, 1); OFSPath ofsPath = new OFSPath(path); OFSPath ofsNewPath = new OFSPath(newPath); // No same-bucket policy check here since this call path is controlled @@ -402,7 +403,7 @@ void rename(OzoneBucket bucket, String path, String newPath) @Override public boolean createDirectory(String pathStr) throws IOException { LOG.trace("creating dir for path: {}", pathStr); - incrementCounter(Statistic.OBJECTS_CREATED); + incrementCounter(Statistic.OBJECTS_CREATED, 1); OFSPath ofsPath = new OFSPath(pathStr); if (ofsPath.getVolumeName().isEmpty()) { // Volume name unspecified, invalid param, return failure @@ -441,7 +442,7 @@ public boolean createDirectory(String pathStr) throws IOException { @Override public boolean deleteObject(String path) { LOG.trace("issuing delete for path to key: {}", path); - incrementCounter(Statistic.OBJECTS_DELETED); + incrementCounter(Statistic.OBJECTS_DELETED, 1); OFSPath ofsPath = new OFSPath(path); String keyName = ofsPath.getKeyName(); if (keyName.length() == 0) { @@ -457,45 +458,81 @@ public boolean deleteObject(String path) { } } + /** + * Helper function to check if the list of key paths are in the same volume + * and same bucket. + */ + private boolean areInSameBucket(List keyNameList) { + if (keyNameList.isEmpty()) { + return true; + } + String firstKeyPath = keyNameList.get(0); + final String volAndBucket = new OFSPath(firstKeyPath).getNonKeyPath(); + // return true only if all key paths' volume and bucket in the list match + // the first element's + return keyNameList.stream().skip(1).allMatch(p -> + new OFSPath(p).getNonKeyPath().equals(volAndBucket)); + } + /** * Helper method to delete an object specified by key name in bucket. * - * @param pathList key name list to be deleted - * @return true if the key is deleted, false otherwise + * Only supports deleting keys in the same bucket in one call. + * + * Each item in the given list should be the String of an OFS path: + * e.g. ofs://om/vol1/buck1/k1 + * + * @param keyNameList key name list to be deleted + * @return true if the key deletion is successful, false otherwise */ @Override - public boolean deleteObjects(List pathList) { - // TODO: we will support deleteObjects in ofs. - LOG.error("ofs currently does not support deleteObjects"); - return false; + public boolean deleteObjects(List keyNameList) { + if (keyNameList.size() == 0) { + return true; + } + // Sanity check. Support only deleting a list of keys in the same bucket + if (!areInSameBucket(keyNameList)) { + LOG.error("Deleting keys from different buckets in a single batch " + + "is not supported."); + return false; + } + try { + OFSPath firstKeyPath = new OFSPath(keyNameList.get(0)); + OzoneBucket bucket = getBucket(firstKeyPath, false); + return deleteObjects(bucket, keyNameList); + } catch (IOException ioe) { + LOG.error("delete key failed: {}", ioe.getMessage()); + return false; + } } /** * Package-private helper function to reduce calls to getBucket(). + * + * This will be faster than the public variant of the method since this + * doesn't verify the same-bucket condition. + * * @param bucket Bucket to operate in. - * @param path Path to delete. - * @return true if operation succeeded, false upon IOException. + * @param keyNameList key name list to be deleted. + * @return true if operation succeeded, false on IOException. */ - boolean deleteObject(OzoneBucket bucket, String path) { - LOG.trace("issuing delete for path to key: {}", path); - incrementCounter(Statistic.OBJECTS_DELETED); - OFSPath ofsPath = new OFSPath(path); - String keyName = ofsPath.getKeyName(); - if (keyName.length() == 0) { - return false; - } + boolean deleteObjects(OzoneBucket bucket, List keyNameList) { + List keyList = keyNameList.stream() + .map(p -> new OFSPath(p).getKeyName()) + .collect(Collectors.toList()); try { - bucket.deleteKey(keyName); + incrementCounter(Statistic.OBJECTS_DELETED, keyNameList.size()); + bucket.deleteKeys(keyList); return true; } catch (IOException ioe) { - LOG.error("delete key failed " + ioe.getMessage()); + LOG.error("delete key failed: {}", ioe.getMessage()); return false; } } public FileStatusAdapter getFileStatus(String path, URI uri, Path qualifiedPath, String userName) throws IOException { - incrementCounter(Statistic.OBJECTS_QUERY); + incrementCounter(Statistic.OBJECTS_QUERY, 1); OFSPath ofsPath = new OFSPath(path); String key = ofsPath.getKeyName(); if (ofsPath.isRoot()) { @@ -579,7 +616,7 @@ public Collection getTrashRoots(boolean allUsers, @Override public Iterator listKeys(String pathStr) { - incrementCounter(Statistic.OBJECTS_LIST); + incrementCounter(Statistic.OBJECTS_LIST, 1); OFSPath ofsPath = new OFSPath(pathStr); String key = ofsPath.getKeyName(); OzoneBucket bucket; @@ -668,7 +705,7 @@ public List listStatus(String pathStr, boolean recursive, String startPath, long numEntries, URI uri, Path workingDir, String username) throws IOException { - incrementCounter(Statistic.OBJECTS_LIST); + incrementCounter(Statistic.OBJECTS_LIST, 1); // Remove authority from startPath if it exists if (startPath.startsWith(uri.toString())) { try { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index 7c284281c80b..015621c2b52a 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -18,20 +18,24 @@ package org.apache.hadoop.fs.ozone; import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; @@ -49,6 +53,7 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.ArrayList; import java.util.Collection; import java.util.EnumSet; import java.util.Iterator; @@ -60,6 +65,8 @@ import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_EMPTY; @@ -182,7 +189,7 @@ public String getScheme() { @Override public FSDataInputStream open(Path path, int bufferSize) throws IOException { - incrementCounter(Statistic.INVOCATION_OPEN); + incrementCounter(Statistic.INVOCATION_OPEN, 1); statistics.incrementReadOps(1); LOG.trace("open() path: {}", path); final String key = pathToKey(path); @@ -191,6 +198,10 @@ public FSDataInputStream open(Path path, int bufferSize) throws IOException { } protected void incrementCounter(Statistic statistic) { + incrementCounter(statistic, 1); + } + + protected void incrementCounter(Statistic statistic, long count) { //don't do anything in this default implementation. } @@ -200,7 +211,7 @@ public FSDataOutputStream create(Path f, FsPermission permission, short replication, long blockSize, Progressable progress) throws IOException { LOG.trace("create() path:{}", f); - incrementCounter(Statistic.INVOCATION_CREATE); + incrementCounter(Statistic.INVOCATION_CREATE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(f); return createOutputStream(key, replication, overwrite, true); @@ -214,7 +225,7 @@ public FSDataOutputStream createNonRecursive(Path path, short replication, long blockSize, Progressable progress) throws IOException { - incrementCounter(Statistic.INVOCATION_CREATE_NON_RECURSIVE); + incrementCounter(Statistic.INVOCATION_CREATE_NON_RECURSIVE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(path); return createOutputStream(key, @@ -254,9 +265,11 @@ private class RenameIterator extends OzoneListingIterator { } @Override - boolean processKeyPath(String keyPath) throws IOException { - String newPath = dstPath.concat(keyPath.substring(srcPath.length())); - adapterImpl.rename(this.bucket, keyPath, newPath); + boolean processKeyPath(List keyPathList) throws IOException { + for (String keyPath : keyPathList) { + String newPath = dstPath.concat(keyPath.substring(srcPath.length())); + adapterImpl.rename(this.bucket, keyPath, newPath); + } return true; } } @@ -276,7 +289,7 @@ boolean processKeyPath(String keyPath) throws IOException { */ @Override public boolean rename(Path src, Path dst) throws IOException { - incrementCounter(Statistic.INVOCATION_RENAME); + incrementCounter(Statistic.INVOCATION_RENAME, 1); statistics.incrementWriteOps(1); if (src.equals(dst)) { return true; @@ -372,6 +385,34 @@ public boolean rename(Path src, Path dst) throws IOException { return result; } + /** + * Intercept rename to trash calls from TrashPolicyDefault, + * convert them to delete calls instead. + */ + @Deprecated + protected void rename(final Path src, final Path dst, + final Options.Rename... options) throws IOException { + boolean hasMoveToTrash = false; + if (options != null) { + for (Options.Rename option : options) { + if (option == Options.Rename.TO_TRASH) { + hasMoveToTrash = true; + break; + } + } + } + if (!hasMoveToTrash) { + // if doesn't have TO_TRASH option, just pass the call to super + super.rename(src, dst, options); + } else { + // intercept when TO_TRASH is found + LOG.info("Move to trash is disabled for ofs, deleting instead: {}. " + + "Files or directories will NOT be retained in trash. " + + "Ignore the following TrashPolicyDefault message, if any.", src); + delete(src, true); + } + } + private class DeleteIterator extends OzoneListingIterator { final private boolean recursive; private final OzoneBucket bucket; @@ -394,17 +435,12 @@ && listStatus(f).length != 0) { } @Override - boolean processKeyPath(String keyPath) { - if (keyPath.equals("")) { - LOG.trace("Skipping deleting root directory"); - return true; - } else { - LOG.trace("Deleting: {}", keyPath); - boolean succeed = adapterImpl.deleteObject(this.bucket, keyPath); - // if recursive delete is requested ignore the return value of - // deleteObject and issue deletes for other keys. - return recursive || succeed; - } + boolean processKeyPath(List keyPathList) { + LOG.trace("Deleting keys: {}", keyPathList); + boolean succeed = adapterImpl.deleteObjects(this.bucket, keyPathList); + // if recursive delete is requested ignore the return value of + // deleteObject and issue deletes for other keys. + return recursive || succeed; } } @@ -429,9 +465,16 @@ private boolean innerDelete(Path f, boolean recursive) throws IOException { } } + /** + * {@inheritDoc} + * + * OFS supports volume and bucket deletion, recursive or non-recursive. + * e.g. delete(new Path("/volume1"), true) + * But root deletion is explicitly disallowed for safety concerns. + */ @Override public boolean delete(Path f, boolean recursive) throws IOException { - incrementCounter(Statistic.INVOCATION_DELETE); + incrementCounter(Statistic.INVOCATION_DELETE, 1); statistics.incrementWriteOps(1); LOG.debug("Delete path {} - recursive {}", f, recursive); FileStatus status; @@ -571,7 +614,7 @@ private boolean o3Exists(final Path f) throws IOException { @Override public FileStatus[] listStatus(Path f) throws IOException { - incrementCounter(Statistic.INVOCATION_LIST_STATUS); + incrementCounter(Statistic.INVOCATION_LIST_STATUS, 1); statistics.incrementReadOps(1); LOG.trace("listStatus() path:{}", f); int numEntries = LISTING_PAGE_SIZE; @@ -675,6 +718,7 @@ private boolean mkdir(Path path) throws IOException { @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { + incrementCounter(Statistic.INVOCATION_MKDIRS); LOG.trace("mkdir() path:{} ", f); String key = pathToKey(f); if (isEmpty(key)) { @@ -685,7 +729,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { @Override public FileStatus getFileStatus(Path f) throws IOException { - incrementCounter(Statistic.INVOCATION_GET_FILE_STATUS); + incrementCounter(Statistic.INVOCATION_GET_FILE_STATUS, 1); statistics.incrementReadOps(1); LOG.trace("getFileStatus() path:{}", f); Path qualifiedPath = f.makeQualified(uri, workingDir); @@ -724,6 +768,73 @@ public short getDefaultReplication() { return adapter.getDefaultReplication(); } + @Override + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, + Path dst) throws IOException { + incrementCounter(Statistic.INVOCATION_COPY_FROM_LOCAL_FILE); + super.copyFromLocalFile(delSrc, overwrite, srcs, dst); + } + + @Override + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, + Path dst) throws IOException { + incrementCounter(Statistic.INVOCATION_COPY_FROM_LOCAL_FILE); + super.copyFromLocalFile(delSrc, overwrite, src, dst); + } + + @Override + public boolean exists(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_EXISTS); + return super.exists(f); + } + + @Override + public FileChecksum getFileChecksum(Path f, long length) throws IOException { + incrementCounter(Statistic.INVOCATION_GET_FILE_CHECKSUM); + return super.getFileChecksum(f, length); + } + + @Override + public FileStatus[] globStatus(Path pathPattern) throws IOException { + incrementCounter(Statistic.INVOCATION_GLOB_STATUS); + return super.globStatus(pathPattern); + } + + @Override + public FileStatus[] globStatus(Path pathPattern, PathFilter filter) + throws IOException { + incrementCounter(Statistic.INVOCATION_GLOB_STATUS); + return super.globStatus(pathPattern, filter); + } + + @Override + @SuppressWarnings("deprecation") + public boolean isDirectory(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_IS_DIRECTORY); + return super.isDirectory(f); + } + + @Override + @SuppressWarnings("deprecation") + public boolean isFile(Path f) throws IOException { + incrementCounter(Statistic.INVOCATION_IS_FILE); + return super.isFile(f); + } + + @Override + public RemoteIterator listFiles(Path f, boolean recursive) + throws IOException { + incrementCounter(Statistic.INVOCATION_LIST_FILES); + return super.listFiles(f, recursive); + } + + @Override + public RemoteIterator listLocatedStatus(Path f) + throws IOException { + incrementCounter(Statistic.INVOCATION_LIST_LOCATED_STATUS); + return super.listLocatedStatus(f); + } + /** * Turn a path (relative or otherwise) into an Ozone key. * @@ -795,7 +906,8 @@ private abstract class OzoneListingIterator { * @return true if we should continue iteration of keys, false otherwise. * @throws IOException */ - abstract boolean processKeyPath(String keyPath) throws IOException; + abstract boolean processKeyPath(List keyPathList) + throws IOException; /** * Iterates through all the keys prefixed with the input path's key and @@ -809,6 +921,9 @@ private abstract class OzoneListingIterator { */ boolean iterate() throws IOException { LOG.trace("Iterating path: {}", path); + List keyPathList = new ArrayList<>(); + int batchSize = getConf().getInt(OZONE_FS_ITERATE_BATCH_SIZE, + OZONE_FS_ITERATE_BATCH_SIZE_DEFAULT); if (status.isDirectory()) { LOG.trace("Iterating directory: {}", pathKey); OFSPath ofsPath = new OFSPath(pathKey); @@ -821,14 +936,27 @@ boolean iterate() throws IOException { // outside AdapterImpl. - Maybe a refactor later. String keyPath = ofsPathPrefix + key.getName(); LOG.trace("iterating key path: {}", keyPath); - if (!processKeyPath(keyPath)) { + if (!key.getName().equals("")) { + keyPathList.add(keyPath); + } + if (keyPathList.size() >= batchSize) { + if (!processKeyPath(keyPathList)) { + return false; + } else { + keyPathList.clear(); + } + } + } + if (keyPathList.size() > 0) { + if (!processKeyPath(keyPathList)) { return false; } } return true; } else { LOG.trace("iterating file: {}", path); - return processKeyPath(pathKey); + keyPathList.add(pathKey); + return processKeyPath(keyPathList); } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java index d19f570af452..4096099c122c 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java @@ -53,9 +53,9 @@ public OzoneClientAdapterImpl(String omHost, int omPort, } @Override - protected void incrementCounter(Statistic objectsRead) { + protected void incrementCounter(Statistic objectsRead, long count) { if (storageStatistics != null) { - storageStatistics.incrementCounter(objectsRead, 1); + storageStatistics.incrementCounter(objectsRead, count); } } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneClientAdapterImpl.java index e5cfb14acc0f..c5d3f05ec769 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneClientAdapterImpl.java @@ -53,9 +53,9 @@ public RootedOzoneClientAdapterImpl(String omHost, int omPort, } @Override - protected void incrementCounter(Statistic objectsRead) { + protected void incrementCounter(Statistic objectsRead, long count) { if (storageStatistics != null) { - storageStatistics.incrementCounter(objectsRead, 1); + storageStatistics.incrementCounter(objectsRead, count); } } } diff --git a/hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/ozonefs-common/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index 95774998b542..60bda723681f 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem-hadoop2 - Apache Hadoop Ozone FileSystem Hadoop 2.x compatibility + Apache Hadoop Ozone FS Hadoop 2.x compatibility jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java new file mode 100644 index 000000000000..4cd04da9c867 --- /dev/null +++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.ozone.OzoneConsts; + +/** + * Ozone implementation of AbstractFileSystem. + * This impl delegates to the RootedOzoneFileSystem + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RootedOzFs extends DelegateToFileSystem { + + public RootedOzFs(URI theUri, Configuration conf) + throws IOException, URISyntaxException { + super(theUri, new RootedOzoneFileSystem(), conf, + OzoneConsts.OZONE_OFS_URI_SCHEME, false); + } + + @Override + public int getUriDefaultPort() { + return -1; + } +} diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml index cbcc29eaa4f8..bad9c41f5737 100644 --- a/hadoop-ozone/ozonefs-hadoop3/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem-hadoop3 - Apache Hadoop Ozone FileSystem Hadoop 3.x compatibility + Apache Hadoop Ozone FS Hadoop 3.x compatibility jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index c3e308b0d3b5..1abf148ce87e 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -81,9 +81,9 @@ StorageStatistics getOzoneFSOpsCountStatistics() { } @Override - protected void incrementCounter(Statistic statistic) { + protected void incrementCounter(Statistic statistic, long count) { if (storageStatistics != null) { - storageStatistics.incrementCounter(statistic, 1); + storageStatistics.incrementCounter(statistic, count); } } diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java new file mode 100644 index 000000000000..076287eaac14 --- /dev/null +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.ozone.OzoneConsts; + +/** + * Ozone implementation of AbstractFileSystem. + * This impl delegates to the RootedOzoneFileSystem + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RootedOzFs extends DelegateToFileSystem { + + public RootedOzFs(URI theUri, Configuration conf) + throws IOException, URISyntaxException { + super(theUri, new RootedOzoneFileSystem(), conf, + OzoneConsts.OZONE_OFS_URI_SCHEME, false); + } +} diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java index 4817f2aa2d7f..6d48ead5ad5f 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java @@ -80,9 +80,9 @@ StorageStatistics getOzoneFSOpsCountStatistics() { } @Override - protected void incrementCounter(Statistic statistic) { + protected void incrementCounter(Statistic statistic, long count) { if (storageStatistics != null) { - storageStatistics.incrementCounter(statistic, 1); + storageStatistics.incrementCounter(statistic, count); } } diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index da8ffc8ea824..2747229d0a23 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem-shaded Apache Hadoop Ozone FileSystem Shaded jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index b758b42e80ee..849b3972f7cf 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-filesystem Apache Hadoop Ozone FileSystem jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index c3e308b0d3b5..1abf148ce87e 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -81,9 +81,9 @@ StorageStatistics getOzoneFSOpsCountStatistics() { } @Override - protected void incrementCounter(Statistic statistic) { + protected void incrementCounter(Statistic statistic, long count) { if (storageStatistics != null) { - storageStatistics.incrementCounter(statistic, 1); + storageStatistics.incrementCounter(statistic, count); } } diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java new file mode 100644 index 000000000000..076287eaac14 --- /dev/null +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.ozone.OzoneConsts; + +/** + * Ozone implementation of AbstractFileSystem. + * This impl delegates to the RootedOzoneFileSystem + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RootedOzFs extends DelegateToFileSystem { + + public RootedOzFs(URI theUri, Configuration conf) + throws IOException, URISyntaxException { + super(theUri, new RootedOzoneFileSystem(), conf, + OzoneConsts.OZONE_OFS_URI_SCHEME, false); + } +} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java index 4817f2aa2d7f..6d48ead5ad5f 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java @@ -80,9 +80,9 @@ StorageStatistics getOzoneFSOpsCountStatistics() { } @Override - protected void incrementCounter(Statistic statistic) { + protected void incrementCounter(Statistic statistic, long count) { if (storageStatistics != null) { - storageStatistics.incrementCounter(statistic, 1); + storageStatistics.incrementCounter(statistic, count); } } diff --git a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 9455fd28f954..6f4b2b2e82ea 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -16,10 +16,10 @@ org.apache.hadoop hadoop-main-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Project Apache Hadoop Ozone pom @@ -41,7 +41,6 @@ datanode s3gateway dist - upgrade csi fault-injection-test insight @@ -211,11 +210,6 @@ hadoop-ozone-recon ${ozone.version} - - org.apache.hadoop - hadoop-ozone-upgrade - ${ozone.version} - org.apache.hadoop hadoop-hdds-container-service @@ -265,6 +259,14 @@ **/hs_err*.log **/target/** .gitattributes + **/.attach_* + **/**.rej + **/.factorypath + public + **/*.iml + **/output.xml + **/log.html + **/report.html .idea/** **/.ssh/id_rsa* dev-support/*tests @@ -291,7 +293,7 @@ webapps/static/angular-route-1.7.9.min.js webapps/static/bootstrap-3.4.1/** webapps/static/d3-3.5.17.min.js - webapps/static/jquery-3.4.1.min.js + webapps/static/jquery-3.5.1.min.js webapps/static/jquery.dataTables.min.js webapps/static/nvd3-1.8.5.min.css.map webapps/static/nvd3-1.8.5.min.css @@ -303,7 +305,7 @@ **/pnpm-lock.yaml **/ozone-recon-web/build/** src/main/license/** - src/main/proto/proto.lock + src/main/resources/proto.lock diff --git a/hadoop-ozone/recon-codegen/pom.xml b/hadoop-ozone/recon-codegen/pom.xml index 9b8780bbff37..917d7a72f9bf 100644 --- a/hadoop-ozone/recon-codegen/pom.xml +++ b/hadoop-ozone/recon-codegen/pom.xml @@ -18,7 +18,7 @@ hadoop-ozone org.apache.hadoop - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT 4.0.0 hadoop-ozone-reconcodegen diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 5696ab3e01bc..1be715dc7d22 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -81,8 +81,8 @@ public void initializeSchema() throws SQLException { */ private void createContainerHistoryTable() { dslContext.createTableIfNotExists(CONTAINER_HISTORY_TABLE_NAME) - .column(CONTAINER_ID, SQLDataType.BIGINT) - .column("datanode_host", SQLDataType.VARCHAR(1024)) + .column(CONTAINER_ID, SQLDataType.BIGINT.nullable(false)) + .column("datanode_host", SQLDataType.VARCHAR(766).nullable(false)) .column("first_report_timestamp", SQLDataType.BIGINT) .column("last_report_timestamp", SQLDataType.BIGINT) .constraint(DSL.constraint("pk_container_id_datanode_host") diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java index 45fc1ba0d73b..72e27024feea 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java @@ -61,7 +61,7 @@ public void initializeSchema() throws SQLException { */ private void createReconTaskStatusTable(Connection conn) { DSL.using(conn).createTableIfNotExists(RECON_TASK_STATUS_TABLE_NAME) - .column("task_name", SQLDataType.VARCHAR(1024)) + .column("task_name", SQLDataType.VARCHAR(768).nullable(false)) .column("last_updated_timestamp", SQLDataType.BIGINT) .column("last_updated_seq_number", SQLDataType.BIGINT) .constraint(DSL.constraint("pk_task_name") diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java index adfaca626d33..394c9de8df59 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,6 +22,7 @@ import com.google.inject.Inject; import com.google.inject.Singleton; +import org.jooq.DSLContext; import org.jooq.impl.DSL; import org.jooq.impl.SQLDataType; @@ -36,6 +37,7 @@ public class StatsSchemaDefinition implements ReconSchemaDefinition { public static final String GLOBAL_STATS_TABLE_NAME = "GLOBAL_STATS"; + private DSLContext dslContext; private final DataSource dataSource; @Inject @@ -46,18 +48,18 @@ public class StatsSchemaDefinition implements ReconSchemaDefinition { @Override public void initializeSchema() throws SQLException { Connection conn = dataSource.getConnection(); + dslContext = DSL.using(conn); if (!TABLE_EXISTS_CHECK.test(conn, GLOBAL_STATS_TABLE_NAME)) { - createGlobalStatsTable(conn); + createGlobalStatsTable(); } } /** * Create the Ozone Global Stats table. - * @param conn connection */ - private void createGlobalStatsTable(Connection conn) { - DSL.using(conn).createTableIfNotExists(GLOBAL_STATS_TABLE_NAME) - .column("key", SQLDataType.VARCHAR(255)) + private void createGlobalStatsTable() { + dslContext.createTableIfNotExists(GLOBAL_STATS_TABLE_NAME) + .column("key", SQLDataType.VARCHAR(255).nullable(false)) .column("value", SQLDataType.BIGINT) .column("last_updated_timestamp", SQLDataType.TIMESTAMP) .constraint(DSL.constraint("pk_key") diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java index 92de19e080dd..193ee758fecc 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java @@ -63,17 +63,17 @@ public void initializeSchema() throws SQLException { Connection conn = dataSource.getConnection(); dslContext = DSL.using(conn); if (!TABLE_EXISTS_CHECK.test(conn, FILE_COUNT_BY_SIZE_TABLE_NAME)) { - createFileSizeCountTable(conn); + createFileSizeCountTable(); } if (!TABLE_EXISTS_CHECK.test(conn, CLUSTER_GROWTH_DAILY_TABLE_NAME)) { - createClusterGrowthTable(conn); + createClusterGrowthTable(); } } - private void createClusterGrowthTable(Connection conn) { + private void createClusterGrowthTable() { dslContext.createTableIfNotExists(CLUSTER_GROWTH_DAILY_TABLE_NAME) - .column("timestamp", SQLDataType.TIMESTAMP) - .column("datanode_id", SQLDataType.INTEGER) + .column("timestamp", SQLDataType.TIMESTAMP.nullable(false)) + .column("datanode_id", SQLDataType.INTEGER.nullable(false)) .column("datanode_host", SQLDataType.VARCHAR(1024)) .column("rack_id", SQLDataType.VARCHAR(1024)) .column("available_size", SQLDataType.BIGINT) @@ -85,17 +85,22 @@ private void createClusterGrowthTable(Connection conn) { .execute(); } - private void createFileSizeCountTable(Connection conn) { + private void createFileSizeCountTable() { dslContext.createTableIfNotExists(FILE_COUNT_BY_SIZE_TABLE_NAME) - .column("volume", SQLDataType.VARCHAR(64)) - .column("bucket", SQLDataType.VARCHAR(64)) - .column("file_size", SQLDataType.BIGINT) + .column("volume", SQLDataType.VARCHAR(64).nullable(false)) + .column("bucket", SQLDataType.VARCHAR(64).nullable(false)) + .column("file_size", SQLDataType.BIGINT.nullable(false)) .column("count", SQLDataType.BIGINT) .constraint(DSL.constraint("pk_volume_bucket_file_size") .primaryKey("volume", "bucket", "file_size")) .execute(); } + /** + * Returns the DSL context. + * + * @return dslContext + */ public DSLContext getDSLContext() { return dslContext; } diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index 6d94cc5b2919..40d0d911ecbf 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -18,7 +18,7 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Recon 4.0.0 diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index 972399fb1040..0cbc61fa2459 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -32,13 +32,11 @@ private ReconConstants() { public static final String RECON_CONTAINER_KEY_DB = "recon-container-key.db"; - public static final String CONTAINER_COUNT_KEY = "totalCount"; + public static final String CONTAINER_COUNT_KEY = "containerCount"; - public static final String RECON_OM_SNAPSHOT_DB = - "om.snapshot.db"; + public static final String RECON_OM_SNAPSHOT_DB = "om.snapshot.db"; - public static final String CONTAINER_KEY_TABLE = - "containerKeyTable"; + public static final String CONTAINER_KEY_TABLE = "containerKeyTable"; public static final String CONTAINER_KEY_COUNT_TABLE = "containerKeyCountTable"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java index ac14757af158..89917675a174 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java @@ -45,6 +45,7 @@ import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask; +import org.apache.hadoop.ozone.recon.tasks.TableCountTask; import org.apache.hadoop.ozone.recon.tasks.ReconOmTask; import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; import org.apache.hadoop.ozone.recon.tasks.ReconTaskControllerImpl; @@ -56,7 +57,8 @@ import com.google.inject.Singleton; import com.google.inject.multibindings.Multibinder; import static org.apache.hadoop.hdds.scm.cli.ContainerOperationClient.newContainerRpcClient; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID; +import static org.apache.hadoop.ozone.OmUtils.getOzoneManagerServiceId; + import org.apache.ratis.protocol.ClientId; import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig; import org.hadoop.ozone.recon.schema.tables.daos.ClusterGrowthDailyDao; @@ -115,6 +117,7 @@ protected void configure() { Multibinder.newSetBinder(binder(), ReconOmTask.class); taskBinder.addBinding().to(ContainerKeyMapperTask.class); taskBinder.addBinding().to(FileSizeCountTask.class); + taskBinder.addBinding().to(TableCountTask.class); } } @@ -152,11 +155,10 @@ OzoneManagerProtocol getOzoneManagerProtocol( try { ClientId clientId = ClientId.randomId(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + String serviceId = getOzoneManagerServiceId(ozoneConfiguration); OmTransport transport = - OmTransportFactory.create(ozoneConfiguration, ugi, - ozoneConfiguration.get(OZONE_OM_INTERNAL_SERVICE_ID)); - ozoneManagerClient = new - OzoneManagerProtocolClientSideTranslatorPB( + OmTransportFactory.create(ozoneConfiguration, ugi, serviceId); + ozoneManagerClient = new OzoneManagerProtocolClientSideTranslatorPB( transport, clientId.toString()); } catch (IOException ioEx) { LOG.error("Error in provisioning OzoneManagerProtocol ", ioEx); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index 360589688a60..704c18e18357 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.ozone.recon; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; - import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -80,11 +78,6 @@ public final class ReconServerConfigKeys { public static final String RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT = "1m"; - public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL = - "ozone.recon.container.db.impl"; - public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL_DEFAULT = - OZONE_METADATA_STORE_IMPL_ROCKSDB; - public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL = "recon.om.snapshot.task.interval.delay"; public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index cba74280b297..b6e4d7c0f179 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -29,6 +29,7 @@ import java.net.URLConnection; import java.nio.file.Path; import java.nio.file.Paths; +import java.sql.Timestamp; import java.util.zip.GZIPOutputStream; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -44,8 +45,14 @@ import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig; import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; +import static org.jooq.impl.DSL.currentTimestamp; +import static org.jooq.impl.DSL.select; +import static org.jooq.impl.DSL.using; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; +import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; +import org.jooq.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -266,4 +273,29 @@ public File getLastKnownDB(File reconDbDir, String fileNamePrefix) { new File(reconDbDir.getPath(), lastKnownSnapshotFileName); } + /** + * Upsert row in GlobalStats table. + * + * @param sqlConfiguration + * @param globalStatsDao + * @param key + * @param count + */ + public static void upsertGlobalStatsTable(Configuration sqlConfiguration, + GlobalStatsDao globalStatsDao, + String key, + Long count) { + // Get the current timestamp + Timestamp now = + using(sqlConfiguration).fetchValue(select(currentTimestamp())); + GlobalStats record = globalStatsDao.fetchOneByKey(key); + GlobalStats newRecord = new GlobalStats(key, count, now); + + // Insert a new record for key if it does not exist + if (record == null) { + globalStatsDao.insert(newRecord); + } else { + globalStatsDao.update(newRecord); + } + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java index 918ee1885d92..de0028c8089b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java @@ -24,10 +24,12 @@ import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.recon.api.types.ClusterStateResponse; import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; +import org.apache.hadoop.ozone.recon.tasks.TableCountTask; +import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; +import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,6 +41,10 @@ import javax.ws.rs.core.Response; import java.util.List; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; + /** * Endpoint to fetch current state of ozone cluster. */ @@ -52,17 +58,17 @@ public class ClusterStateEndpoint { private ReconNodeManager nodeManager; private ReconPipelineManager pipelineManager; private ReconContainerManager containerManager; - private ReconOMMetadataManager omMetadataManager; + private GlobalStatsDao globalStatsDao; @Inject ClusterStateEndpoint(OzoneStorageContainerManager reconSCM, - ReconOMMetadataManager omMetadataManager) { + GlobalStatsDao globalStatsDao) { this.nodeManager = (ReconNodeManager) reconSCM.getScmNodeManager(); this.pipelineManager = (ReconPipelineManager) reconSCM.getPipelineManager(); this.containerManager = (ReconContainerManager) reconSCM.getContainerManager(); - this.omMetadataManager = omMetadataManager; + this.globalStatsDao = globalStatsDao; } /** @@ -80,25 +86,20 @@ public Response getClusterState() { new DatanodeStorageReport(stats.getCapacity().get(), stats.getScmUsed().get(), stats.getRemaining().get()); ClusterStateResponse.Builder builder = ClusterStateResponse.newBuilder(); - if (omMetadataManager.isOmTablesInitialized()) { - try { - builder.setVolumes( - omMetadataManager.getVolumeTable().getEstimatedKeyCount()); - } catch (Exception ex) { - LOG.error("Unable to get Volumes count in ClusterStateResponse.", ex); - } - try { - builder.setBuckets( - omMetadataManager.getBucketTable().getEstimatedKeyCount()); - } catch (Exception ex) { - LOG.error("Unable to get Buckets count in ClusterStateResponse.", ex); - } - try { - builder.setKeys( - omMetadataManager.getKeyTable().getEstimatedKeyCount()); - } catch (Exception ex) { - LOG.error("Unable to get Keys count in ClusterStateResponse.", ex); - } + GlobalStats volumeRecord = globalStatsDao.findById( + TableCountTask.getRowKeyFromTable(VOLUME_TABLE)); + GlobalStats bucketRecord = globalStatsDao.findById( + TableCountTask.getRowKeyFromTable(BUCKET_TABLE)); + GlobalStats keyRecord = globalStatsDao.findById( + TableCountTask.getRowKeyFromTable(KEY_TABLE)); + if (volumeRecord != null) { + builder.setVolumes(volumeRecord.getValue()); + } + if (bucketRecord != null) { + builder.setBuckets(bucketRecord.getValue()); + } + if (keyRecord != null) { + builder.setKeys(keyRecord.getValue()); } ClusterStateResponse response = builder .setStorageReport(storageReport) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index c534062e5ed2..1778b846d49f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -157,7 +157,7 @@ public Response getKeysForContainer( // Directly calling get() on the Key table instead of iterating since // only full keys are supported now. When we change to using a prefix // of the key, this needs to change to prefix seek. - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get( + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().getSkipCache( containerKeyPrefix.getKeyPrefix()); if (null != omKeyInfo) { // Filter keys by version. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 2c017491f59d..bd022c4f1da2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -121,6 +121,10 @@ public Response getDatanodes() { .withPipelines(pipelines) .withLeaderCount(leaderCount.get()) .withUUid(datanode.getUuidString()) + .withVersion(datanode.getVersion()) + .withSetupTime(datanode.getSetupTime()) + .withRevision(datanode.getRevision()) + .withBuildDate(datanode.getBuildDate()) .build()); }); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 02d9ae811829..f75ea3233f37 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -55,6 +55,18 @@ public final class DatanodeMetadata { @XmlElement(name = "leaderCount") private int leaderCount; + @XmlElement(name = "version") + private String version; + + @XmlElement(name = "setupTime") + private long setupTime; + + @XmlElement(name = "revision") + private String revision; + + @XmlElement(name = "buildDate") + private String buildDate; + private DatanodeMetadata(Builder builder) { this.hostname = builder.hostname; this.uuid = builder.uuid; @@ -64,6 +76,10 @@ private DatanodeMetadata(Builder builder) { this.pipelines = builder.pipelines; this.containers = builder.containers; this.leaderCount = builder.leaderCount; + this.version = builder.version; + this.setupTime = builder.setupTime; + this.revision = builder.revision; + this.buildDate = builder.buildDate; } public String getHostname() { @@ -98,6 +114,22 @@ public String getUuid() { return uuid; } + public String getVersion() { + return version; + } + + public long getSetupTime() { + return setupTime; + } + + public String getRevision() { + return revision; + } + + public String getBuildDate() { + return buildDate; + } + /** * Returns new builder class that builds a DatanodeMetadata. * @@ -120,6 +152,10 @@ public static final class Builder { private List pipelines; private int containers; private int leaderCount; + private String version; + private long setupTime; + private String revision; + private String buildDate; public Builder() { this.containers = 0; @@ -167,6 +203,26 @@ public Builder withUUid(String uuid) { return this; } + public Builder withVersion(String version) { + this.version = version; + return this; + } + + public Builder withSetupTime(long setupTime) { + this.setupTime = setupTime; + return this; + } + + public Builder withRevision(String revision) { + this.revision = revision; + return this; + } + + public Builder withBuildDate(String buildDate) { + this.buildDate = buildDate; + return this; + } + /** * Constructs DatanodeMetadata. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java new file mode 100644 index 000000000000..c11ebbf63a63 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.recon.codec; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto.PARSER; + +import java.io.IOException; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.utils.db.Codec; + +/** + * Codec for DatanodeDetails. + */ +public class DatanodeDetailsCodec implements Codec { + + @Override + public byte[] toPersistedFormat(DatanodeDetails object) throws IOException { + return object.getProtoBufMessage().toByteArray(); + } + + @Override + public DatanodeDetails fromPersistedFormat(byte[] rawData) + throws IOException { + return DatanodeDetails.getFromProtoBuf(PARSER.parseFrom(rawData)); + } + + @Override + public DatanodeDetails copyObject(DatanodeDetails object) { + return object; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/ReconNodeDBKeyCodec.java similarity index 59% rename from hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDBDefinition.java rename to hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/ReconNodeDBKeyCodec.java index bcfe060e7baf..8c569203a8df 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDBDefinition.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/ReconNodeDBKeyCodec.java @@ -16,23 +16,31 @@ * limitations under the License. * */ -package org.apache.hadoop.ozone.recon.scm; -import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; -import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; +package org.apache.hadoop.ozone.recon.codec; + +import java.io.IOException; +import java.util.UUID; + +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.db.Codec; /** - * SCM db file for ozone. + * Codec for UUID. */ -public class ReconDBDefinition extends SCMDBDefinition { +public class ReconNodeDBKeyCodec implements Codec { + @Override + public byte[] toPersistedFormat(UUID object) throws IOException { + return StringUtils.string2Bytes(object.toString()); + } @Override - public String getName() { - return "recon-scm.db"; + public UUID fromPersistedFormat(byte[] rawData) throws IOException { + return UUID.fromString(StringUtils.bytes2String(rawData)); } @Override - public String getLocationConfigKey() { - return ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; + public UUID copyObject(UUID object) { + return null; } } diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java similarity index 87% rename from hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java rename to hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java index b14768329cb9..0812d39a3091 100644 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,8 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.upgrade; - /** - * In-Place upgrade utility to upgrade HDDS to Ozone cluster.. - */ \ No newline at end of file + * This package defines the codecs for Recon DB tables. + */ +package org.apache.hadoop.ozone.recon.codec; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java index 72d1548d5960..dff4709f56b1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java @@ -18,16 +18,21 @@ package org.apache.hadoop.ozone.recon.scm; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FINALIZE; + import java.io.IOException; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.SCMContainerManager; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; import org.apache.hadoop.hdds.utils.db.Table; @@ -77,6 +82,7 @@ public ReconContainerManager( * @throws IOException on Error. */ public void checkAndAddNewContainer(ContainerID containerID, + ContainerReplicaProto.State replicaState, DatanodeDetails datanodeDetails) throws IOException { if (!exists(containerID)) { @@ -84,15 +90,34 @@ public void checkAndAddNewContainer(ContainerID containerID, datanodeDetails.getHostName()); ContainerWithPipeline containerWithPipeline = scmClient.getContainerWithPipeline(containerID.getId()); - LOG.debug("Verified new container from SCM {} ", - containerWithPipeline.getContainerInfo().containerID()); + LOG.debug("Verified new container from SCM {}, {} ", + containerID, containerWithPipeline.getPipeline().getId()); // If no other client added this, go ahead and add this container. if (!exists(containerID)) { addNewContainer(containerID.getId(), containerWithPipeline); } + } else { + // Check if container state is not open. In SCM, container state + // changes to CLOSING first, and then the close command is pushed down + // to Datanodes. Recon 'learns' this from DN, and hence replica state + // will move container state to 'CLOSING'. + ContainerInfo containerInfo = getContainer(containerID); + if (containerInfo.getState().equals(HddsProtos.LifeCycleState.OPEN) + && !replicaState.equals(ContainerReplicaProto.State.OPEN) + && isHealthy(replicaState)) { + LOG.info("Container {} has state OPEN, but Replica has State {}.", + containerID, replicaState); + updateContainerState(containerID, FINALIZE); + } } } + private boolean isHealthy(ContainerReplicaProto.State replicaState) { + return replicaState != ContainerReplicaProto.State.UNHEALTHY + && replicaState != ContainerReplicaProto.State.INVALID + && replicaState != ContainerReplicaProto.State.DELETED; + } + /** * Adds a new container to Recon's container manager. * @param containerId id @@ -105,18 +130,32 @@ public void addNewContainer(long containerId, ContainerInfo containerInfo = containerWithPipeline.getContainerInfo(); getLock().lock(); try { - if (getPipelineManager().containsPipeline( - containerWithPipeline.getPipeline().getId())) { - getContainerStateManager().addContainerInfo(containerId, containerInfo, - getPipelineManager(), containerWithPipeline.getPipeline()); + boolean success = false; + if (containerInfo.getState().equals(HddsProtos.LifeCycleState.OPEN)) { + PipelineID pipelineID = containerWithPipeline.getPipeline().getId(); + if (getPipelineManager().containsPipeline(pipelineID)) { + getContainerStateManager().addContainerInfo(containerId, + containerInfo, getPipelineManager(), + containerWithPipeline.getPipeline()); + success = true; + } else { + // Get open container for a pipeline that Recon does not know + // about yet. Cannot update internal state until pipeline is synced. + LOG.warn(String.format( + "Pipeline %s not found. Cannot add container %s", + pipelineID, containerInfo.containerID())); + } + } else { + // Non 'Open' Container. No need to worry about pipeline since SCM + // returns a random pipelineID. + getContainerStateManager().addContainerInfo(containerId, + containerInfo, getPipelineManager(), null); + success = true; + } + if (success) { addContainerToDB(containerInfo); LOG.info("Successfully added container {} to Recon.", containerInfo.containerID()); - } else { - throw new IOException( - String.format("Pipeline %s not found. Cannot add container %s", - containerWithPipeline.getPipeline().getId(), - containerInfo.containerID())); } } catch (IOException ex) { LOG.info("Exception while adding container {} .", diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java index 0bf63a217021..228a65793099 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java @@ -60,10 +60,10 @@ public void onMessage(final ContainerReportFromDatanode reportFromDatanode, containerReplicaProto.getContainerID()); try { containerManager.checkAndAddNewContainer(id, + containerReplicaProto.getState(), reportFromDatanode.getDatanodeDetails()); } catch (IOException ioEx) { LOG.error("Exception while checking and adding new container.", ioEx); - return; } LOG.debug("Got container report for containerID {} ", containerReplicaProto.getContainerID()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java index a5d946e7f3db..9e8887213f7c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java @@ -50,9 +50,18 @@ public ReconIncrementalContainerReportHandler(NodeManager nodeManager, @Override public void onMessage(final IncrementalContainerReportFromDatanode report, final EventPublisher publisher) { + final DatanodeDetails dnFromReport = report.getDatanodeDetails(); if (LOG.isDebugEnabled()) { LOG.debug("Processing incremental container report from data node {}", - report.getDatanodeDetails()); + dnFromReport); + } + + DatanodeDetails dd = + getNodeManager().getNodeByUuid(dnFromReport.getUuidString()); + if (dd == null) { + LOG.warn("Received container report from unknown datanode {}", + dnFromReport); + return; } ReconContainerManager containerManager = @@ -61,11 +70,10 @@ public void onMessage(final IncrementalContainerReportFromDatanode report, for (ContainerReplicaProto replicaProto : report.getReport().getReportList()) { try { - final DatanodeDetails dd = report.getDatanodeDetails(); final ContainerID id = ContainerID.valueof( replicaProto.getContainerID()); try { - containerManager.checkAndAddNewContainer(id, + containerManager.checkAndAddNewContainer(id, replicaProto.getState(), report.getDatanodeDetails()); } catch (IOException ioEx) { LOG.error("Exception while checking and adding new container.", ioEx); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java index 60e8a0635eba..d7a6104cf8b0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.recon.scm; -import java.io.File; import java.io.IOException; import java.util.HashMap; import java.util.List; @@ -26,29 +25,21 @@ import java.util.Set; import java.util.UUID; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; -import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.util.Time; import com.google.common.collect.ImmutableSet; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_SCM_NODE_DB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,7 +51,7 @@ public class ReconNodeManager extends SCMNodeManager { public static final Logger LOG = LoggerFactory .getLogger(ReconNodeManager.class); - private final MetadataStore nodeStore; + private Table nodeDB; private final static Set ALLOWED_COMMANDS = ImmutableSet.of(reregisterCommand); @@ -73,27 +64,20 @@ public class ReconNodeManager extends SCMNodeManager { public ReconNodeManager(OzoneConfiguration conf, SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher, - NetworkTopology networkTopology) throws IOException { + NetworkTopology networkTopology, + Table nodeDB) { super(conf, scmStorageConfig, eventPublisher, networkTopology); - final File nodeDBPath = getNodeDBPath(conf); - final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - this.nodeStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setDbFile(nodeDBPath) - .setCacheSize(cacheSize * OzoneConsts.MB) - .build(); + this.nodeDB = nodeDB; loadExistingNodes(); } private void loadExistingNodes() { try { - List> range = nodeStore - .getSequentialRangeKVs(null, Integer.MAX_VALUE, null); int nodeCount = 0; - for (Map.Entry entry : range) { - DatanodeDetails datanodeDetails = DatanodeDetails.getFromProtoBuf( - HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(entry.getValue())); + TableIterator> + iterator = nodeDB.iterator(); + while (iterator.hasNext()) { + DatanodeDetails datanodeDetails = iterator.next().getValue(); register(datanodeDetails, null, null); nodeCount++; } @@ -108,27 +92,10 @@ private void loadExistingNodes() { * @param datanodeDetails Datanode details. */ public void addNodeToDB(DatanodeDetails datanodeDetails) throws IOException { - byte[] nodeIdBytes = - StringUtils.string2Bytes(datanodeDetails.getUuidString()); - byte[] nodeDetailsBytes = - datanodeDetails.getProtoBufMessage().toByteArray(); - nodeStore.put(nodeIdBytes, nodeDetailsBytes); + nodeDB.put(datanodeDetails.getUuid(), datanodeDetails); LOG.info("Adding new node {} to Node DB.", datanodeDetails.getUuid()); } - protected File getNodeDBPath(ConfigurationSource conf) { - File metaDir = ReconUtils.getReconScmDbDir(conf); - return new File(metaDir, RECON_SCM_NODE_DB); - } - - @Override - public void close() throws IOException { - if (nodeStore != null) { - nodeStore.close(); - } - super.close(); - } - /** * Returns the last heartbeat time of the given node. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java new file mode 100644 index 000000000000..e56a66b831da --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.recon.scm; + +import java.util.UUID; + +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; +import org.apache.hadoop.ozone.recon.codec.DatanodeDetailsCodec; +import org.apache.hadoop.ozone.recon.codec.ReconNodeDBKeyCodec; + +/** + * Recon SCM db file for ozone. + */ +public class ReconSCMDBDefinition extends SCMDBDefinition { + + public static final String RECON_SCM_DB_NAME = "recon-scm.db"; + + public static final DBColumnFamilyDefinition + NODES = + new DBColumnFamilyDefinition( + "nodes", + UUID.class, + new ReconNodeDBKeyCodec(), + DatanodeDetails.class, + new DatanodeDetailsCodec()); + + @Override + public String getName() { + return RECON_SCM_DB_NAME; + } + + @Override + public String getLocationConfigKey() { + return ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; + } + + @Override + public DBColumnFamilyDefinition[] getColumnFamilies() { + return ArrayUtils.add(super.getColumnFamilies(), NODES); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index 34a930a9a48e..3a0342ebc696 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -100,10 +100,11 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, this.scmStorageConfig = new ReconStorageConfig(conf); this.clusterMap = new NetworkTopologyImpl(conf); dbStore = DBStoreBuilder - .createDBStore(ozoneConfiguration, new ReconDBDefinition()); + .createDBStore(ozoneConfiguration, new ReconSCMDBDefinition()); this.nodeManager = - new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap); + new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, + ReconSCMDBDefinition.NODES.getTable(dbStore)); placementMetrics = SCMContainerPlacementMetrics.create(); this.containerPlacementPolicy = ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager, @@ -114,10 +115,10 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, new ReconPipelineManager(conf, nodeManager, - ReconDBDefinition.PIPELINES.getTable(dbStore), + ReconSCMDBDefinition.PIPELINES.getTable(dbStore), eventQueue); this.containerManager = new ReconContainerManager(conf, - ReconDBDefinition.CONTAINERS.getTable(dbStore), + ReconSCMDBDefinition.CONTAINERS.getTable(dbStore), dbStore, pipelineManager, scmServiceProvider, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java index ec87352ea64d..6360cf2bba0b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java @@ -19,16 +19,12 @@ package org.apache.hadoop.ozone.recon.spi.impl; import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_COUNT_KEY; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_COUNT_TABLE; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_TABLE; import static org.apache.hadoop.ozone.recon.spi.impl.ReconContainerDBProvider.getNewDBStore; -import static org.jooq.impl.DSL.currentTimestamp; -import static org.jooq.impl.DSL.select; -import static org.jooq.impl.DSL.using; +import static org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition.CONTAINER_KEY; +import static org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition.CONTAINER_KEY_COUNT; import java.io.File; import java.io.IOException; -import java.sql.Timestamp; import java.util.LinkedHashMap; import java.util.Map; @@ -41,7 +37,6 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; -import org.apache.hadoop.ozone.recon.persistence.ContainerSchemaManager; import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.Table; @@ -67,9 +62,6 @@ public class ContainerDBServiceProviderImpl private Table containerKeyCountTable; private GlobalStatsDao globalStatsDao; - @Inject - private ContainerSchemaManager containerSchemaManager; - @Inject private OzoneConfiguration configuration; @@ -79,9 +71,6 @@ public class ContainerDBServiceProviderImpl @Inject private Configuration sqlConfiguration; - @Inject - private ReconUtils reconUtils; - @Inject public ContainerDBServiceProviderImpl(DBStore dbStore, Configuration sqlConfiguration) { @@ -113,13 +102,19 @@ public void initNewContainerDB(Map throws IOException { File oldDBLocation = containerDbStore.getDbLocation(); - containerDbStore = getNewDBStore(configuration, reconUtils); + try { + containerDbStore.close(); + } catch (Exception e) { + LOG.warn("Unable to close old Recon container key DB at {}.", + containerDbStore.getDbLocation().getAbsolutePath()); + } + containerDbStore = getNewDBStore(configuration); LOG.info("Creating new Recon Container DB at {}", containerDbStore.getDbLocation().getAbsolutePath()); initializeTables(); if (oldDBLocation.exists()) { - LOG.info("Cleaning up old Recon Container DB at {}.", + LOG.info("Cleaning up old Recon Container key DB at {}.", oldDBLocation.getAbsolutePath()); FileUtils.deleteDirectory(oldDBLocation); } @@ -140,10 +135,9 @@ public void initNewContainerDB(Map */ private void initializeTables() { try { - this.containerKeyTable = containerDbStore.getTable(CONTAINER_KEY_TABLE, - ContainerKeyPrefix.class, Integer.class); - this.containerKeyCountTable = containerDbStore - .getTable(CONTAINER_KEY_COUNT_TABLE, Long.class, Long.class); + this.containerKeyTable = CONTAINER_KEY.getTable(containerDbStore); + this.containerKeyCountTable = + CONTAINER_KEY_COUNT.getTable(containerDbStore); } catch (IOException e) { LOG.error("Unable to create Container Key tables.", e); } @@ -155,7 +149,7 @@ private void initializeTables() { * * @param containerKeyPrefix the containerID, key-prefix tuple. * @param count Count of the keys matching that prefix. - * @throws IOException + * @throws IOException on failure. */ @Override public void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix, @@ -169,7 +163,7 @@ public void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix, * * @param containerID the containerID. * @param count count of the keys within the given containerID. - * @throws IOException + * @throws IOException on failure. */ @Override public void storeContainerKeyCount(Long containerID, Long count) @@ -182,7 +176,7 @@ public void storeContainerKeyCount(Long containerID, Long count) * * @param containerID the given containerID. * @return count of keys within the given containerID. - * @throws IOException + * @throws IOException on failure. */ @Override public long getKeyCountForContainer(Long containerID) throws IOException { @@ -195,7 +189,7 @@ public long getKeyCountForContainer(Long containerID) throws IOException { * * @param containerID the given containerID. * @return if the given ContainerID exists or not. - * @throws IOException + * @throws IOException on failure. */ @Override public boolean doesContainerExists(Long containerID) throws IOException { @@ -208,7 +202,7 @@ public boolean doesContainerExists(Long containerID) throws IOException { * * @param containerKeyPrefix the containerID, key-prefix tuple. * @return count of keys matching the containerID, key-prefix. - * @throws IOException + * @throws IOException on failure. */ @Override public Integer getCountForContainerKeyPrefix( @@ -308,7 +302,7 @@ public Map getKeyPrefixesForContainer( * @param prevContainer containerID after which the * list of containers are scanned. * @return Map of containerID -> containerMetadata. - * @throws IOException + * @throws IOException on failure. */ @Override public Map getContainers(int limit, @@ -388,20 +382,8 @@ public TableIterator getContainerTableIterator() { */ @Override public void storeContainerCount(Long count) { - // Get the current timestamp - Timestamp now = - using(sqlConfiguration).fetchValue(select(currentTimestamp())); - GlobalStats containerCountRecord = - globalStatsDao.fetchOneByKey(CONTAINER_COUNT_KEY); - GlobalStats globalStatsRecord = - new GlobalStats(CONTAINER_COUNT_KEY, count, now); - - // Insert a new record for CONTAINER_COUNT_KEY if it does not exist - if (containerCountRecord == null) { - globalStatsDao.insert(globalStatsRecord); - } else { - globalStatsDao.update(globalStatsRecord); - } + ReconUtils.upsertGlobalStatsTable(sqlConfiguration, globalStatsDao, + CONTAINER_COUNT_KEY, count); } /** diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java index d622eb357be6..ec36597a0816 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java @@ -18,20 +18,15 @@ package org.apache.hadoop.ozone.recon.spi.impl; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_COUNT_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_CONTAINER_KEY_DB; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_TABLE; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import java.io.File; -import java.nio.file.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.hdds.utils.db.IntegerCodec; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,10 +64,10 @@ public DBStore get() { if (lastKnownContainerKeyDb != null) { LOG.info("Last known container-key DB : {}", lastKnownContainerKeyDb.getAbsolutePath()); - dbStore = initializeDBStore(configuration, reconUtils, + dbStore = initializeDBStore(configuration, lastKnownContainerKeyDb.getName()); } else { - dbStore = getNewDBStore(configuration, reconUtils); + dbStore = getNewDBStore(configuration); } if (dbStore == null) { throw new ProvisionException("Unable to provide instance of DBStore " + @@ -82,28 +77,19 @@ public DBStore get() { } private static DBStore initializeDBStore(OzoneConfiguration configuration, - ReconUtils reconUtils, String dbName) { + String dbName) { DBStore dbStore = null; try { - Path metaDir = reconUtils.getReconDbDir( - configuration, OZONE_RECON_DB_DIR).toPath(); - dbStore = DBStoreBuilder.newBuilder(configuration) - .setPath(metaDir) - .setName(dbName) - .addTable(CONTAINER_KEY_TABLE) - .addTable(CONTAINER_KEY_COUNT_TABLE) - .addCodec(ContainerKeyPrefix.class, new ContainerKeyPrefixCodec()) - .addCodec(Integer.class, new IntegerCodec()) - .build(); + dbStore = DBStoreBuilder.createDBStore(configuration, + new ReconDBDefinition(dbName)); } catch (Exception ex) { LOG.error("Unable to initialize Recon container metadata store.", ex); } return dbStore; } - static DBStore getNewDBStore(OzoneConfiguration configuration, - ReconUtils reconUtils) { + static DBStore getNewDBStore(OzoneConfiguration configuration) { String dbName = RECON_CONTAINER_KEY_DB + "_" + System.currentTimeMillis(); - return initializeDBStore(configuration, reconUtils, dbName); + return initializeDBStore(configuration, dbName); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java new file mode 100644 index 000000000000..4f5a4c79e267 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.recon.spi.impl; + +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.IntegerCodec; +import org.apache.hadoop.hdds.utils.db.LongCodec; +import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; +import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; + +/** + * RocksDB definition for the DB internal to Recon. + */ +public class ReconDBDefinition implements DBDefinition { + + private String dbName; + + public ReconDBDefinition(String dbName) { + this.dbName = dbName; + } + + public static final DBColumnFamilyDefinition + CONTAINER_KEY = + new DBColumnFamilyDefinition<>( + "containerKeyTable", + ContainerKeyPrefix.class, + new ContainerKeyPrefixCodec(), + Integer.class, + new IntegerCodec()); + + public static final DBColumnFamilyDefinition + CONTAINER_KEY_COUNT = + new DBColumnFamilyDefinition<>( + "containerKeyCountTable", + Long.class, + new LongCodec(), + Long.class, + new LongCodec()); + + @Override + public String getName() { + return dbName; + } + + @Override + public String getLocationConfigKey() { + return ReconServerConfigKeys.OZONE_RECON_DB_DIR; + } + + @Override + public DBColumnFamilyDefinition[] getColumnFamilies() { + return new DBColumnFamilyDefinition[] {CONTAINER_KEY, CONTAINER_KEY_COUNT}; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java index 80b25265adeb..7092c548d949 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java @@ -243,8 +243,8 @@ private void handleDeleteKeyEvent(String key, OmKeyInfo omKeyInfo, Map fileSizeCountMap) { if (omKeyInfo == null) { - LOG.warn("Unexpected error while handling DELETE key event. Key not " + - "found in Recon OM DB : {}", key); + LOG.warn("Deleting a key not found while handling DELETE key event. Key" + + " not found in Recon OM DB : {}", key); } else { FileSizeCountKey countKey = getFileSizeCountKey(omKeyInfo); Long count = fileSizeCountMap.containsKey(countKey) ? diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java index 949439cc8b31..f32b04ea67bc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java @@ -22,7 +22,7 @@ /** * A class used to encapsulate a single OM DB update event. - * Currently only PUT and DELETE are supported. + * Currently PUT, DELETE and UPDATE are supported. * @param Type of Key. * @param Type of Value. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java index 82e1ae8b9f32..34c4c332c37f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java @@ -18,9 +18,6 @@ package org.apache.hadoop.ozone.recon.tasks; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE; @@ -29,13 +26,12 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.apache.hadoop.hdds.utils.db.CodecRegistry; -import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; import org.rocksdb.RocksDBException; import org.rocksdb.WriteBatch; import org.slf4j.Logger; @@ -53,16 +49,17 @@ public class OMDBUpdatesHandler extends WriteBatch.Handler { private CodecRegistry codecRegistry; private OMMetadataManager omMetadataManager; private List omdbUpdateEvents = new ArrayList<>(); + private OMDBDefinition omdbDefinition; public OMDBUpdatesHandler(OMMetadataManager metadataManager) { omMetadataManager = metadataManager; tablesNames = metadataManager.getStore().getTableNames(); codecRegistry = metadataManager.getStore().getCodecRegistry(); + omdbDefinition = new OMDBDefinition(); } @Override - public void put(int cfIndex, byte[] keyBytes, byte[] valueBytes) throws - RocksDBException { + public void put(int cfIndex, byte[] keyBytes, byte[] valueBytes) { try { processEvent(cfIndex, keyBytes, valueBytes, OMDBUpdateEvent.OMDBUpdateAction.PUT); @@ -72,7 +69,7 @@ public void put(int cfIndex, byte[] keyBytes, byte[] valueBytes) throws } @Override - public void delete(int cfIndex, byte[] keyBytes) throws RocksDBException { + public void delete(int cfIndex, byte[] keyBytes) { try { processEvent(cfIndex, keyBytes, null, OMDBUpdateEvent.OMDBUpdateAction.DELETE); @@ -93,41 +90,44 @@ private void processEvent(int cfIndex, byte[] keyBytes, byte[] valueBytes, OMDBUpdateEvent.OMDBUpdateAction action) throws IOException { String tableName = tablesNames.get(cfIndex); - Class keyType = getKeyType(); - Class valueType = getValueType(tableName); - if (valueType != null) { + Optional keyType = omdbDefinition.getKeyType(tableName); + Optional valueType = omdbDefinition.getValueType(tableName); + if (keyType.isPresent() && valueType.isPresent()) { OMDBUpdateEvent.OMUpdateEventBuilder builder = new OMDBUpdateEvent.OMUpdateEventBuilder<>(); builder.setTable(tableName); builder.setAction(action); - - String key = codecRegistry.asObject(keyBytes, keyType); + String key = (String) codecRegistry.asObject(keyBytes, keyType.get()); builder.setKey(key); + // Put new + // Put existing --> Update + // Delete existing + // Delete non-existing + Table table = omMetadataManager.getTable(tableName); + // Recon does not add entries to cache and it is safer to always use + // getSkipCache in Recon. + Object oldValue = table.getSkipCache(key); if (action == PUT) { - Object value = codecRegistry.asObject(valueBytes, valueType); + Object value = codecRegistry.asObject(valueBytes, valueType.get()); builder.setValue(value); - // If a PUT key operation happens on an existing Key, it is tagged + // If a PUT operation happens on an existing Key, it is tagged // as an "UPDATE" event. - if (tableName.equalsIgnoreCase(KEY_TABLE)) { - if (omMetadataManager.getKeyTable().isExist(key)) { - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(key); - builder.setOldValue(omKeyInfo); - builder.setAction(UPDATE); - } + if (oldValue != null) { + builder.setOldValue(oldValue); + builder.setAction(UPDATE); } } else if (action.equals(DELETE)) { - // When you delete a Key, we add the old OmKeyInfo to the event so that + // When you delete a Key, we add the old value to the event so that // a downstream task can use it. - if (tableName.equalsIgnoreCase(KEY_TABLE)) { - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(key); - builder.setValue(omKeyInfo); - } + builder.setValue(oldValue); } OMDBUpdateEvent event = builder.build(); - LOG.debug("Generated OM update Event for table : " + event.getTable() - + ", Key = " + event.getKey() + ", action = " + event.getAction()); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Generated OM update Event for table : %s, " + + "action = %s", tableName, action)); + } if (omdbUpdateEvents.contains(event)) { // If the same event is part of this batch, the last one only holds. // For example, if there are 2 PUT key1 events, then the first one @@ -135,6 +135,13 @@ private void processEvent(int cfIndex, byte[] keyBytes, byte[] omdbUpdateEvents.remove(event); } omdbUpdateEvents.add(event); + } else { + // key type or value type cannot be determined for this table. + // log a warn message and ignore the update. + if (LOG.isWarnEnabled()) { + LOG.warn(String.format("KeyType or ValueType could not be determined" + + " for table %s. Ignoring the event.", tableName)); + } } } @@ -261,30 +268,6 @@ public void markCommit(byte[] bytes) throws RocksDBException { */ } - /** - * Return Key type class for a given table name. - * @param name table name. - * @return String.class by default. - */ - private Class getKeyType() { - return String.class; - } - - /** - * Return Value type class for a given table. - * @param name table name - * @return Value type based on table name. - */ - @VisibleForTesting - protected Class getValueType(String name) { - switch (name) { - case KEY_TABLE : return OmKeyInfo.class; - case VOLUME_TABLE : return OmVolumeArgs.class; - case BUCKET_TABLE : return OmBucketInfo.class; - default: return null; - } - } - /** * Get List of events. * @return List of events. @@ -292,5 +275,4 @@ protected Class getValueType(String name) { public List getEvents() { return omdbUpdateEvents; } - } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java index 240bf3813182..440985365af8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java @@ -94,7 +94,7 @@ public void registerTask(ReconOmTask task) { * For every registered task, we try process step twice and then reprocess * once (if process failed twice) to absorb the events. If a task has failed * reprocess call more than 2 times across events, it is unregistered - * (blacklisted). + * (ignored). * @param events set of events * @throws InterruptedException */ @@ -140,7 +140,7 @@ public synchronized void consumeOMEvents(OMUpdateEventBatch events, results = executorService.invokeAll(tasks); List reprocessFailedTasks = processTaskResults(results, events); - blacklistFailedTasks(reprocessFailedTasks); + ignoreFailedTasks(reprocessFailedTasks); } } } catch (ExecutionException e) { @@ -149,15 +149,15 @@ public synchronized void consumeOMEvents(OMUpdateEventBatch events, } /** - * Blacklist tasks that failed reprocess step more than threshold times. + * Ignore tasks that failed reprocess step more than threshold times. * @param failedTasks list of failed tasks. */ - private void blacklistFailedTasks(List failedTasks) { + private void ignoreFailedTasks(List failedTasks) { for (String taskName : failedTasks) { LOG.info("Reprocess step failed for task {}.", taskName); if (taskFailureCounter.get(taskName).incrementAndGet() > TASK_FAILURE_THRESHOLD) { - LOG.info("Blacklisting Task since it failed retry and " + + LOG.info("Ignoring task since it failed retry and " + "reprocess more than {} times.", TASK_FAILURE_THRESHOLD); reconOmTasks.remove(taskName); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java new file mode 100644 index 000000000000..262152983c63 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import com.google.inject.Inject; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; +import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; +import org.jooq.Configuration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map.Entry; + +/** + * Class to iterate over the OM DB and store the total counts of volumes, + * buckets, keys, open keys, deleted keys, etc. + */ +public class TableCountTask implements ReconOmTask { + private static final Logger LOG = + LoggerFactory.getLogger(TableCountTask.class); + + private GlobalStatsDao globalStatsDao; + private Configuration sqlConfiguration; + private ReconOMMetadataManager reconOMMetadataManager; + + @Inject + public TableCountTask(GlobalStatsDao globalStatsDao, + Configuration sqlConfiguration, + ReconOMMetadataManager reconOMMetadataManager) { + this.globalStatsDao = globalStatsDao; + this.sqlConfiguration = sqlConfiguration; + this.reconOMMetadataManager = reconOMMetadataManager; + } + + /** + * Iterate the rows of each table in OM snapshot DB and calculate the + * counts for each table. + * + * @param omMetadataManager OM Metadata instance. + * @return Pair + */ + @Override + public Pair reprocess(OMMetadataManager omMetadataManager) { + for (String tableName : getTaskTables()) { + Table table = omMetadataManager.getTable(tableName); + try (TableIterator keyIter = table.iterator()) { + long count = getCount(keyIter); + ReconUtils.upsertGlobalStatsTable(sqlConfiguration, globalStatsDao, + getRowKeyFromTable(tableName), + count); + } catch (IOException ioEx) { + LOG.error("Unable to populate Table Count in Recon DB.", ioEx); + return new ImmutablePair<>(getTaskName(), false); + } + } + LOG.info("Completed a 'reprocess' run of TableCountTask."); + return new ImmutablePair<>(getTaskName(), true); + } + + private long getCount(Iterator iterator) { + long count = 0L; + while (iterator.hasNext()) { + count++; + iterator.next(); + } + return count; + } + + @Override + public String getTaskName() { + return "TableCountTask"; + } + + @Override + public Collection getTaskTables() { + return new ArrayList<>(reconOMMetadataManager.listTableNames()); + } + + /** + * Read the update events and update the count of respective object + * (volume, bucket, key etc.) based on the action (put or delete). + * + * @param events Update events - PUT, DELETE and UPDATE. + * @return Pair + */ + @Override + public Pair process(OMUpdateEventBatch events) { + Iterator eventIterator = events.getIterator(); + + HashMap objectCountMap = initializeCountMap(); + + while (eventIterator.hasNext()) { + OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); + String rowKey = getRowKeyFromTable(omdbUpdateEvent.getTable()); + try{ + switch (omdbUpdateEvent.getAction()) { + case PUT: + objectCountMap.computeIfPresent(rowKey, (k, count) -> count + 1L); + break; + + case DELETE: + // if value is null, it means that the volume / bucket / key + // is already deleted and does not exist in the OM database anymore. + if (omdbUpdateEvent.getValue() != null) { + String key = getRowKeyFromTable(omdbUpdateEvent.getTable()); + objectCountMap.computeIfPresent(key, + (k, count) -> count > 0 ? count - 1L : 0L); + } + break; + + default: LOG.trace("Skipping DB update event : Table: {}, Action: {}", + omdbUpdateEvent.getTable(), omdbUpdateEvent.getAction()); + } + } catch (Exception e) { + LOG.error("Unexpected exception while processing the table {}, " + + "Action: {}", omdbUpdateEvent.getTable(), + omdbUpdateEvent.getAction(), e); + return new ImmutablePair<>(getTaskName(), false); + } + } + for (Entry entry: objectCountMap.entrySet()) { + ReconUtils.upsertGlobalStatsTable(sqlConfiguration, globalStatsDao, + entry.getKey(), + entry.getValue()); + } + + LOG.info("Completed a 'process' run of TableCountTask."); + return new ImmutablePair<>(getTaskName(), true); + } + + private HashMap initializeCountMap() { + Collection tables = getTaskTables(); + HashMap objectCountMap = new HashMap<>(tables.size()); + for (String tableName: tables) { + String key = getRowKeyFromTable(tableName); + objectCountMap.put(key, getCountForKey(key)); + } + return objectCountMap; + } + + public static String getRowKeyFromTable(String tableName) { + return tableName + "Count"; + } + + /** + * Get the count stored for the given key from Global Stats table. + * Return 0 if record not found. + * + * @param key Key in the global stats table + * @return count + */ + private long getCountForKey(String key) { + GlobalStats record = globalStatsDao.fetchOneByKey(key); + + return (record == null) ? 0L : record.getValue(); + } +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 82fae3735b12..8d61333b087b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -41,7 +41,11 @@ } ], "containers": 80, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574728775759, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost2.storage.enterprise.com", @@ -68,7 +72,11 @@ } ], "containers": 8192, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724805059, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost3.storage.enterprise.com", @@ -101,7 +109,11 @@ } ], "containers": 43, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1343544679543, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost4.storage.enterprise.com", @@ -115,7 +127,11 @@ }, "pipelines": [], "containers": 0, - "leaderCount": 0 + "leaderCount": 0, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1074724802059, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost5.storage.enterprise.com", @@ -142,7 +158,11 @@ } ], "containers": 643, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724816029, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost6.storage.enterprise.com", @@ -169,7 +189,11 @@ } ], "containers": 5, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724802059, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost7.storage.enterprise.com", @@ -202,7 +226,11 @@ } ], "containers": 64, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724676009, + "revision": "aaf470000cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-19T13:45Z" }, { "hostname": "localhost8.storage.enterprise.com", @@ -229,7 +257,11 @@ } ], "containers": 21, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724276050, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost9.storage.enterprise.com", @@ -256,7 +288,11 @@ } ], "containers": 897, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724573011, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost10.storage.enterprise.com", @@ -289,7 +325,11 @@ } ], "containers": 6754, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574723756059, + "revision": "caf471111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T15:45Z" }, { "hostname": "localhost11.storage.enterprise.com", @@ -316,7 +356,11 @@ } ], "containers": 78, - "leaderCount": 2 + "leaderCount": 2, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1474724705783, + "revision": "ace991111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T10:45Z" }, { "hostname": "localhost12.storage.enterprise.com", @@ -343,7 +387,11 @@ } ], "containers": 543, - "leaderCount": 1 + "leaderCount": 1, + "version": "0.6.0-SNAPSHOT", + "setupTime": 1574724706232, + "revision": "ace991111cdb9168ec013f4526bb997aa513e079", + "buildDate": "2020-07-20T10:45Z" } ] }, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx index fee9a883a0dc..773828dcd0ed 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx @@ -42,9 +42,9 @@ class AutoReloadPanel extends React.Component { const lastUpdatedText = lastUpdated === 0 ? 'NA' : ( - {moment(lastUpdated).format('LT')} + {moment(lastUpdated).format('LTS')} ); return ( diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/multiSelect/multiSelect.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/multiSelect/multiSelect.tsx index 19005dddd11e..417c2efdcb7a 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/multiSelect/multiSelect.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/multiSelect/multiSelect.tsx @@ -36,6 +36,7 @@ interface IMultiSelectProps extends ReactSelectProps { options: IOption[]; allowSelectAll: boolean; allOption?: IOption; + maxShowValues?: number; } const defaultProps = { @@ -48,7 +49,7 @@ const defaultProps = { export class MultiSelect extends PureComponent { static defaultProps = defaultProps; render() { - const {allowSelectAll, allOption, options, onChange} = this.props; + const {allowSelectAll, allOption, options, maxShowValues = 5, onChange} = this.props; if (allowSelectAll) { const Option = (props: OptionProps) => { return ( @@ -70,7 +71,7 @@ export class MultiSelect extends PureComponent { let toBeRendered = children; if (currentValues.some(val => val.value === allOption!.value) && children) { toBeRendered = allOption!.label; - } else if (currentValues.length >= 5) { + } else if (currentValues.length > maxShowValues) { toBeRendered = `${currentValues.length} selected`; } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx index ba8336b678c3..e9cb16820814 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx @@ -16,7 +16,9 @@ * limitations under the License. */ -export type DatanodeStatus = 'HEALTHY' | 'STALE' | 'DEAD' | 'DECOMMISSIONING' | 'DECOMMISSIONED'; +export const DatanodeStatusList = ['HEALTHY', 'STALE', 'DEAD', 'DECOMMISSIONING', 'DECOMMISSIONED'] as const; +type DatanodeStatusTuple = typeof DatanodeStatusList; +export type DatanodeStatus = DatanodeStatusTuple[number]; // 'HEALTHY' | 'STALE' | 'DEAD' | 'DECOMMISSIONING' | 'DECOMMISSIONED'; export interface IStorageReport { capacity: number; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.less new file mode 100644 index 000000000000..4c7013a1304d --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.less @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +.column-search-container { + padding: 8px; + + .input-block { + width: 188px; + margin-bottom: 8px; + display: block; + } + + .search-button { + width: 90px; + margin-right: 8px; + } + + .reset-button { + width: 90px; + } +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.tsx new file mode 100644 index 000000000000..319bfd29e013 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/columnSearch.tsx @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import {Input, Button, Icon} from 'antd'; +import './columnSearch.less'; + +class ColumnSearch extends React.PureComponent { + searchInput: Input | null = null; + + getColumnSearchProps = (dataIndex: string) => ({ + filterDropdown: ({ + setSelectedKeys, + selectedKeys, + confirm, + clearFilters + }: { + setSelectedKeys: (keys: string[]) => void; + selectedKeys: string[]; + confirm: () => void; + clearFilters: () => void; + }) => ( +

+ { + this.searchInput = node; + }} + className='input-block' + placeholder={`Search ${dataIndex}`} + value={selectedKeys[0]} + onChange={e => + setSelectedKeys(e.target.value ? [e.target.value] : [])} + onPressEnter={() => this.handleSearch(confirm)} + /> + + +
+ ), + filterIcon: (filtered: boolean) => ( + + ), + onFilter: (value: string, record: any) => + record[dataIndex].toString().toLowerCase().includes(value.toLowerCase()), + onFilterDropdownVisibleChange: (visible: boolean) => { + if (visible) { + setTimeout(() => { + if (this.searchInput) { + this.searchInput.select(); + } + }); + } + } + }); + + handleSearch = (confirm: () => void) => { + confirm(); + }; + + handleReset = (clearFilters: () => void) => { + clearFilters(); + }; +} + +export {ColumnSearch}; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less index 4a3cdf5accc9..644437dc8776 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.less @@ -22,4 +22,19 @@ margin-bottom: 5px; } } + + .filter-block { + font-size: 14px; + font-weight: normal; + display: inline-block; + margin-left: 20px; + } + + .multi-select-container { + padding-left: 5px; + margin-right: 5px; + display: inline-block; + min-width: 200px; + z-index: 99; + } } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index bfba82a5bebb..91b6a45aa99d 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -23,11 +23,14 @@ import {PaginationConfig} from 'antd/lib/pagination'; import moment from 'moment'; import {ReplicationIcon} from 'utils/themeIcons'; import StorageBar from 'components/storageBar/storageBar'; -import {DatanodeStatus, IStorageReport} from 'types/datanode.types'; +import {DatanodeStatus, DatanodeStatusList, IStorageReport} from 'types/datanode.types'; import './datanodes.less'; import {AutoReloadHelper} from 'utils/autoReloadHelper'; import AutoReloadPanel from 'components/autoReloadPanel/autoReloadPanel'; +import {MultiSelect, IOption} from 'components/multiSelect/multiSelect'; +import {ActionMeta, ValueType} from 'react-select'; import {showDataFetchError} from 'utils/common'; +import {ColumnSearch} from 'utils/columnSearch'; interface IDatanodeResponse { hostname: string; @@ -38,6 +41,10 @@ interface IDatanodeResponse { containers: number; leaderCount: number; uuid: string; + version: string; + setupTime: number; + revision: string; + buildDate: string; } interface IDatanodesResponse { @@ -56,6 +63,10 @@ interface IDatanode { containers: number; leaderCount: number; uuid: string; + version: string; + setupTime: number; + revision: string; + buildDate: string; } interface IPipeline { @@ -70,6 +81,8 @@ interface IDatanodesState { dataSource: IDatanode[]; totalCount: number; lastUpdated: number; + selectedColumns: IOption[]; + columnOptions: IOption[]; } const renderDatanodeStatus = (status: DatanodeStatus) => { @@ -89,27 +102,38 @@ const COLUMNS = [ title: 'Status', dataIndex: 'state', key: 'state', + isVisible: true, + filterMultiple: true, + filters: DatanodeStatusList.map(status => ({text: status, value: status})), + onFilter: (value: DatanodeStatus, record: IDatanode) => record.state === value, render: (text: DatanodeStatus) => renderDatanodeStatus(text), - sorter: (a: IDatanode, b: IDatanode) => a.state.localeCompare(b.state) - }, - { - title: 'Uuid', - dataIndex: 'uuid', - key: 'uuid', - sorter: (a: IDatanode, b: IDatanode) => a.uuid.localeCompare(b.uuid), - defaultSortOrder: 'ascend' as const + sorter: (a: IDatanode, b: IDatanode) => a.state.localeCompare(b.state), + fixed: 'left' }, { title: 'Hostname', dataIndex: 'hostname', key: 'hostname', + isVisible: true, + isSearchable: true, sorter: (a: IDatanode, b: IDatanode) => a.hostname.localeCompare(b.hostname), + defaultSortOrder: 'ascend' as const, + fixed: 'left' + }, + { + title: 'Uuid', + dataIndex: 'uuid', + key: 'uuid', + isVisible: true, + isSearchable: true, + sorter: (a: IDatanode, b: IDatanode) => a.uuid.localeCompare(b.uuid), defaultSortOrder: 'ascend' as const }, { title: 'Storage Capacity', dataIndex: 'storageUsed', key: 'storageUsed', + isVisible: true, sorter: (a: IDatanode, b: IDatanode) => a.storageRemaining - b.storageRemaining, render: (text: string, record: IDatanode) => ( a.lastHeartbeat - b.lastHeartbeat, render: (heartbeat: number) => { - return heartbeat > 0 ? moment(heartbeat).format('lll') : 'NA'; + return heartbeat > 0 ? moment(heartbeat).format('ll LTS') : 'NA'; } }, { title: 'Pipeline ID(s)', dataIndex: 'pipelines', key: 'pipelines', + isVisible: true, render: (pipelines: IPipeline[], record: IDatanode) => { return (
@@ -158,16 +184,67 @@ const COLUMNS = [ , dataIndex: 'leaderCount', key: 'leaderCount', + isVisible: true, + isSearchable: true, sorter: (a: IDatanode, b: IDatanode) => a.leaderCount - b.leaderCount }, { title: 'Containers', dataIndex: 'containers', key: 'containers', + isVisible: true, + isSearchable: true, sorter: (a: IDatanode, b: IDatanode) => a.containers - b.containers + }, + { + title: 'Version', + dataIndex: 'version', + key: 'version', + isVisible: false, + isSearchable: true, + sorter: (a: IDatanode, b: IDatanode) => a.version.localeCompare(b.version), + defaultSortOrder: 'ascend' as const + }, + { + title: 'SetupTime', + dataIndex: 'setupTime', + key: 'setupTime', + isVisible: false, + sorter: (a: IDatanode, b: IDatanode) => a.setupTime - b.setupTime, + render: (uptime: number) => { + return uptime > 0 ? moment(uptime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Revision', + dataIndex: 'revision', + key: 'revision', + isVisible: false, + isSearchable: true, + sorter: (a: IDatanode, b: IDatanode) => a.revision.localeCompare(b.revision), + defaultSortOrder: 'ascend' as const + }, + { + title: 'BuildDate', + dataIndex: 'buildDate', + key: 'buildDate', + isVisible: false, + isSearchable: true, + sorter: (a: IDatanode, b: IDatanode) => a.buildDate.localeCompare(b.buildDate), + defaultSortOrder: 'ascend' as const } ]; +const allColumnsOption: IOption = { + label: 'Select all', + value: '*' +}; + +const defaultColumns: IOption[] = COLUMNS.map(column => ({ + label: column.key, + value: column.key +})); + export class Datanodes extends React.Component, IDatanodesState> { autoReload: AutoReloadHelper; @@ -177,15 +254,33 @@ export class Datanodes extends React.Component, IDatanode loading: false, dataSource: [], totalCount: 0, - lastUpdated: 0 + lastUpdated: 0, + selectedColumns: [], + columnOptions: defaultColumns }; this.autoReload = new AutoReloadHelper(this._loadData); } - _loadData = () => { + _handleColumnChange = (selected: ValueType, _action: ActionMeta) => { + const selectedColumns = (selected as IOption[]); this.setState({ - loading: true + selectedColumns }); + }; + + _getSelectedColumns = (selected: IOption[]) => { + const selectedColumns = selected.length > 0 ? selected : COLUMNS.filter(column => column.isVisible).map(column => ({ + label: column.key, + value: column.key + })); + return selectedColumns; + }; + + _loadData = () => { + this.setState(prevState => ({ + loading: true, + selectedColumns: this._getSelectedColumns(prevState.selectedColumns) + })); axios.get('/api/v1/datanodes').then(response => { const datanodesResponse: IDatanodesResponse = response.data; const totalCount = datanodesResponse.totalCount; @@ -201,9 +296,14 @@ export class Datanodes extends React.Component, IDatanode storageRemaining: datanode.storageReport.remaining, pipelines: datanode.pipelines, containers: datanode.containers, - leaderCount: datanode.leaderCount + leaderCount: datanode.leaderCount, + version: datanode.version, + setupTime: datanode.setupTime, + revision: datanode.revision, + buildDate: datanode.buildDate }; }); + this.setState({ loading: false, dataSource, @@ -233,7 +333,7 @@ export class Datanodes extends React.Component, IDatanode }; render() { - const {dataSource, loading, totalCount, lastUpdated} = this.state; + const {dataSource, loading, totalCount, lastUpdated, selectedColumns, columnOptions} = this.state; const paginationConfig: PaginationConfig = { showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} datanodes`, showSizeChanger: true, @@ -243,10 +343,51 @@ export class Datanodes extends React.Component, IDatanode
Datanodes ({totalCount}) - +
+ Columns +
+
+
- +
((filtered, column) => { + if (selectedColumns.some(e => e.value === column.key)) { + if (column.isSearchable) { + const newColumn = { + ...column, + ...new ColumnSearch(column).getColumnSearchProps(column.dataIndex) + }; + filtered.push(newColumn); + } else { + filtered.push(column); + } + } + + return filtered; + }, [])} + loading={loading} + pagination={paginationConfig} + rowKey='hostname' + scroll={{x: true, y: false, scrollToFirstRowOnChange: true}} + /> ); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx index 15b6858bbc4a..342a8bd35f3f 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.tsx @@ -28,9 +28,12 @@ import {AutoReloadHelper} from 'utils/autoReloadHelper'; import AutoReloadPanel from 'components/autoReloadPanel/autoReloadPanel'; import {showDataFetchError} from 'utils/common'; import {IAxiosResponse} from 'types/axios.types'; +import {ColumnSearch} from 'utils/columnSearch'; const {TabPane} = Tabs; -export type PipelineStatus = 'active' | 'inactive'; +const PipelineStatusList = ['OPEN', 'CLOSING', 'QUASI_CLOSED', 'CLOSED', 'UNHEALTHY', 'INVALID', 'DELETED'] as const; +type PipelineStatusTuple = typeof PipelineStatusList; +export type PipelineStatus = PipelineStatusTuple[number]; // 'OPEN' | 'CLOSING' | 'QUASI_CLOSED' | 'CLOSED' | 'UNHEALTHY' | 'INVALID' | 'DELETED'; interface IPipelineResponse { pipelineId: string; @@ -62,6 +65,7 @@ const COLUMNS = [ title: 'Pipeline ID', dataIndex: 'pipelineId', key: 'pipelineId', + isSearchable: true, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.pipelineId.localeCompare(b.pipelineId) }, { @@ -89,24 +93,30 @@ const COLUMNS = [ title: 'Status', dataIndex: 'status', key: 'status', + filterMultiple: true, + filters: PipelineStatusList.map(status => ({text: status, value: status})), + onFilter: (value: PipelineStatus, record: IPipelineResponse) => record.status === value, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.status.localeCompare(b.status) }, { title: 'Containers', dataIndex: 'containers', key: 'containers', + isSearchable: true, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.containers - b.containers }, { title: 'Datanodes', dataIndex: 'datanodes', key: 'datanodes', + isSearchable: true, render: (datanodes: string[]) =>
{datanodes.map(datanode =>
{datanode}
)}
}, { title: 'Leader', dataIndex: 'leaderNode', key: 'leaderNode', + isSearchable: true, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.leaderNode.localeCompare(b.leaderNode) }, { @@ -114,7 +124,7 @@ const COLUMNS = [ dataIndex: 'lastLeaderElection', key: 'lastLeaderElection', render: (lastLeaderElection: number) => lastLeaderElection > 0 ? - moment(lastLeaderElection).format('lll') : 'NA', + moment(lastLeaderElection).format('ll LTS') : 'NA', sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.lastLeaderElection - b.lastLeaderElection }, { @@ -128,6 +138,7 @@ const COLUMNS = [ title: 'No. of Elections', dataIndex: 'leaderElections', key: 'leaderElections', + isSearchable: true, sorter: (a: IPipelineResponse, b: IPipelineResponse) => a.leaderElections - b.leaderElections } ]; @@ -205,7 +216,22 @@ export class Pipelines extends React.Component, IPipeline
-
+
((filtered, column) => { + if (column.isSearchable) { + const newColumn = { + ...column, + ...new ColumnSearch(column).getColumnSearchProps(column.dataIndex) + }; + filtered.push(newColumn); + } else { + filtered.push(column); + } + + return filtered; + }, [])} + loading={activeLoading} pagination={paginationConfig} rowKey='pipelineId'/> diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index a592119cdb55..f1350a9ebf7d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -59,10 +59,14 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask; +import org.apache.hadoop.ozone.recon.tasks.TableCountTask; import org.apache.hadoop.test.LambdaTestUtils; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao; +import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize; +import org.jooq.Configuration; +import org.jooq.DSLContext; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -74,6 +78,7 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm; +import static org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.BDDMockito.given; @@ -97,6 +102,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private UtilizationEndpoint utilizationEndpoint; private ReconOMMetadataManager reconOMMetadataManager; private FileSizeCountTask fileSizeCountTask; + private TableCountTask tableCountTask; private ReconStorageContainerManagerFacade reconScm; private boolean isSetupDone = false; private String pipelineId; @@ -107,6 +113,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private DatanodeDetailsProto datanodeDetailsProto; private Pipeline pipeline; private FileCountBySizeDao fileCountBySizeDao; + private DSLContext dslContext; private final String host1 = "host1.datanode"; private final String host2 = "host2.datanode"; private final String ip1 = "1.1.1.1"; @@ -166,17 +173,23 @@ private void initializeInjector() throws IOException { nodeEndpoint = reconTestInjector.getInstance(NodeEndpoint.class); pipelineEndpoint = reconTestInjector.getInstance(PipelineEndpoint.class); - clusterStateEndpoint = - reconTestInjector.getInstance(ClusterStateEndpoint.class); fileCountBySizeDao = getDao(FileCountBySizeDao.class); + GlobalStatsDao globalStatsDao = getDao(GlobalStatsDao.class); UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class); + Configuration sqlConfiguration = + reconTestInjector.getInstance(Configuration.class); utilizationEndpoint = new UtilizationEndpoint( fileCountBySizeDao, utilizationSchemaDefinition); fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition); + tableCountTask = new TableCountTask( + globalStatsDao, sqlConfiguration, reconOMMetadataManager); reconScm = (ReconStorageContainerManagerFacade) reconTestInjector.getInstance(OzoneStorageContainerManager.class); + clusterStateEndpoint = + new ClusterStateEndpoint(reconScm, globalStatsDao); + dslContext = getDslContext(); } @Before @@ -305,6 +318,9 @@ public void setUp() throws Exception { // key = key_three writeDataToOm(reconOMMetadataManager, "key_three"); + + // Truncate global stats table before running each test + dslContext.truncate(GLOBAL_STATS); } private void testDatanodeResponse(DatanodeMetadata datanodeMetadata) @@ -415,9 +431,9 @@ public void testGetClusterState() throws Exception { (ClusterStateResponse) response.getEntity(); Assert.assertEquals(1, clusterStateResponse.getPipelines()); - Assert.assertEquals(2, clusterStateResponse.getVolumes()); - Assert.assertEquals(2, clusterStateResponse.getBuckets()); - Assert.assertEquals(3, clusterStateResponse.getKeys()); + Assert.assertEquals(0, clusterStateResponse.getVolumes()); + Assert.assertEquals(0, clusterStateResponse.getBuckets()); + Assert.assertEquals(0, clusterStateResponse.getKeys()); Assert.assertEquals(2, clusterStateResponse.getTotalDatanodes()); Assert.assertEquals(2, clusterStateResponse.getHealthyDatanodes()); @@ -427,6 +443,16 @@ public void testGetClusterState() throws Exception { (ClusterStateResponse) response1.getEntity(); return (clusterStateResponse1.getContainers() == 1); }); + + // check volume, bucket and key count after running table count task + Pair result = + tableCountTask.reprocess(reconOMMetadataManager); + assertTrue(result.getRight()); + response = clusterStateEndpoint.getClusterState(); + clusterStateResponse = (ClusterStateResponse) response.getEntity(); + Assert.assertEquals(2, clusterStateResponse.getVolumes()); + Assert.assertEquals(2, clusterStateResponse.getBuckets()); + Assert.assertEquals(3, clusterStateResponse.getKeys()); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java index 04010e512f3d..783f42ca3929 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -33,6 +34,7 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.recon.persistence.ContainerSchemaManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; @@ -40,6 +42,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; +import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.CONTAINERS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; import org.junit.After; @@ -69,17 +72,17 @@ public void setUp() throws Exception { conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().getAbsolutePath()); conf.set(OZONE_SCM_NAMES, "localhost"); - store = DBStoreBuilder.createDBStore(conf, new ReconDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); scmStorageConfig = new ReconStorageConfig(conf); NetworkTopology clusterMap = new NetworkTopologyImpl(conf); EventQueue eventQueue = new EventQueue(); NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap); pipelineManager = new ReconPipelineManager(conf, nodeManager, - ReconDBDefinition.PIPELINES.getTable(store), eventQueue); + ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue); containerManager = new ReconContainerManager( conf, - ReconDBDefinition.CONTAINERS.getTable(store), + ReconSCMDBDefinition.CONTAINERS.getTable(store), store, pipelineManager, getScmServiceProvider(), @@ -129,4 +132,46 @@ private StorageContainerServiceProvider getScmServiceProvider() .thenReturn(containerWithPipeline); return scmServiceProviderMock; } + + protected Table getContainerTable() + throws IOException { + return CONTAINERS.getTable(store); + } + + protected ContainerWithPipeline getTestContainer(LifeCycleState state) + throws IOException { + ContainerID containerID = new ContainerID(100L); + Pipeline pipeline = getRandomPipeline(); + pipelineManager.addPipeline(pipeline); + ContainerInfo containerInfo = + new ContainerInfo.Builder() + .setContainerID(containerID.getId()) + .setNumberOfKeys(10) + .setPipelineID(pipeline.getId()) + .setReplicationFactor(ONE) + .setOwner("test") + .setState(state) + .setReplicationType(STAND_ALONE) + .build(); + return new ContainerWithPipeline(containerInfo, pipeline); + } + + protected ContainerWithPipeline getTestContainer(long id, + LifeCycleState state) + throws IOException { + ContainerID containerID = new ContainerID(id); + Pipeline pipeline = getRandomPipeline(); + pipelineManager.addPipeline(pipeline); + ContainerInfo containerInfo = + new ContainerInfo.Builder() + .setContainerID(containerID.getId()) + .setNumberOfKeys(10) + .setPipelineID(pipeline.getId()) + .setReplicationFactor(ONE) + .setOwner("test") + .setState(state) + .setReplicationType(STAND_ALONE) + .build(); + return new ContainerWithPipeline(containerInfo, pipeline); + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java index ccc1c80569a6..9f47779e3b33 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java @@ -19,10 +19,9 @@ package org.apache.hadoop.ozone.recon.scm; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING; +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -32,10 +31,11 @@ import java.util.NavigableSet; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.junit.Test; /** @@ -45,39 +45,57 @@ public class TestReconContainerManager extends AbstractReconContainerManagerTest { @Test - public void testAddNewContainer() throws IOException { - ContainerID containerID = new ContainerID(100L); - Pipeline pipeline = getRandomPipeline(); - ReconPipelineManager pipelineManager = getPipelineManager(); - pipelineManager.addPipeline(pipeline); - ContainerInfo containerInfo = - new ContainerInfo.Builder() - .setContainerID(containerID.getId()) - .setNumberOfKeys(10) - .setPipelineID(pipeline.getId()) - .setReplicationFactor(ONE) - .setOwner("test") - .setState(OPEN) - .setReplicationType(STAND_ALONE) - .build(); + public void testAddNewOpenContainer() throws IOException { ContainerWithPipeline containerWithPipeline = - new ContainerWithPipeline(containerInfo, pipeline); + getTestContainer(LifeCycleState.OPEN); + ContainerID containerID = + containerWithPipeline.getContainerInfo().containerID(); + ContainerInfo containerInfo = containerWithPipeline.getContainerInfo(); ReconContainerManager containerManager = getContainerManager(); assertFalse(containerManager.exists(containerID)); + assertFalse(getContainerTable().isExist(containerID)); containerManager.addNewContainer( containerID.getId(), containerWithPipeline); assertTrue(containerManager.exists(containerID)); - List containers = containerManager.getContainers(OPEN); + List containers = + containerManager.getContainers(LifeCycleState.OPEN); assertEquals(1, containers.size()); assertEquals(containerInfo, containers.get(0)); NavigableSet containersInPipeline = - pipelineManager.getContainersInPipeline(pipeline.getId()); + getPipelineManager().getContainersInPipeline( + containerWithPipeline.getPipeline().getId()); assertEquals(1, containersInPipeline.size()); assertEquals(containerID, containersInPipeline.first()); + + // Verify container DB. + assertTrue(getContainerTable().isExist(containerID)); + } + + @Test + public void testAddNewClosedContainer() throws IOException { + ContainerWithPipeline containerWithPipeline = getTestContainer(CLOSED); + ContainerID containerID = + containerWithPipeline.getContainerInfo().containerID(); + ContainerInfo containerInfo = containerWithPipeline.getContainerInfo(); + + ReconContainerManager containerManager = getContainerManager(); + assertFalse(containerManager.exists(containerID)); + assertFalse(getContainerTable().isExist(containerID)); + + containerManager.addNewContainer( + containerID.getId(), containerWithPipeline); + + assertTrue(containerManager.exists(containerID)); + + List containers = containerManager.getContainers(CLOSED); + assertEquals(1, containers.size()); + assertEquals(containerInfo, containers.get(0)); + // Verify container DB. + assertTrue(getContainerTable().isExist(containerID)); } @Test @@ -86,12 +104,39 @@ public void testCheckAndAddNewContainer() throws IOException { ReconContainerManager containerManager = getContainerManager(); assertFalse(containerManager.exists(containerID)); DatanodeDetails datanodeDetails = randomDatanodeDetails(); - containerManager.checkAndAddNewContainer(containerID, datanodeDetails); + containerManager.checkAndAddNewContainer(containerID, + OPEN, datanodeDetails); assertTrue(containerManager.exists(containerID)); // Doing it one more time should not change any state. - containerManager.checkAndAddNewContainer(containerID, datanodeDetails); + containerManager.checkAndAddNewContainer(containerID, OPEN, + datanodeDetails); assertTrue(containerManager.exists(containerID)); + assertEquals(LifeCycleState.OPEN, + getContainerManager().getContainer(containerID).getState()); } + @Test + public void testUpdateContainerStateFromOpen() throws IOException { + ContainerWithPipeline containerWithPipeline = + getTestContainer(LifeCycleState.OPEN); + + long id = containerWithPipeline.getContainerInfo().getContainerID(); + ContainerID containerID = + containerWithPipeline.getContainerInfo().containerID(); + + // Adding container #100. + getContainerManager().addNewContainer(id, containerWithPipeline); + assertEquals(LifeCycleState.OPEN, + getContainerManager().getContainer(containerID).getState()); + + DatanodeDetails datanodeDetails = randomDatanodeDetails(); + + // First report with "CLOSED" replica state moves container state to + // "CLOSING". + getContainerManager().checkAndAddNewContainer(containerID, State.CLOSED, + datanodeDetails); + assertEquals(CLOSING, + getContainerManager().getContainer(containerID).getState()); + } } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java index d4f28c01fec5..1b42f21712de 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java @@ -19,24 +19,38 @@ package org.apache.hadoop.ozone.recon.scm; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.UUID; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; /** @@ -59,19 +73,86 @@ public void testProcessICR() throws IOException, NodeNotFoundException { datanodeDetails.getUuidString()); when(reportMock.getReport()).thenReturn(containerReport); - NodeManager nodeManagerMock = mock(NodeManager.class); + final String path = + GenericTestUtils.getTempPath(UUID.randomUUID().toString()); + Path scmPath = Paths.get(path, "scm-meta"); + final OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); + NetworkTopology clusterMap = new NetworkTopologyImpl(conf); + EventQueue eventQueue = new EventQueue(); + SCMStorageConfig storageConfig = new SCMStorageConfig(conf); + NodeManager nodeManager = + new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap); + nodeManager.register(datanodeDetails, null, null); ReconContainerManager containerManager = getContainerManager(); ReconIncrementalContainerReportHandler reconIcr = - new ReconIncrementalContainerReportHandler(nodeManagerMock, + new ReconIncrementalContainerReportHandler(nodeManager, containerManager); EventPublisher eventPublisherMock = mock(EventPublisher.class); reconIcr.onMessage(reportMock, eventPublisherMock); - verify(nodeManagerMock, times(1)) - .addContainer(datanodeDetails, containerID); + nodeManager.addContainer(datanodeDetails, containerID); assertTrue(containerManager.exists(containerID)); assertEquals(1, containerManager.getContainerReplicas(containerID).size()); + assertEquals(OPEN, containerManager.getContainer(containerID).getState()); + } + + @Test + public void testProcessICRStateMismatch() throws IOException { + + // Recon container state is "OPEN". + // Replica state could be any Non OPEN state. + long containerId = 11; + for (State state : Arrays.asList(State.CLOSING, State.QUASI_CLOSED, + State.CLOSED)) { + ContainerWithPipeline containerWithPipeline = getTestContainer( + containerId++, OPEN); + ContainerID containerID = + containerWithPipeline.getContainerInfo().containerID(); + + ReconContainerManager containerManager = getContainerManager(); + containerManager.addNewContainer(containerID.getId(), + containerWithPipeline); + + DatanodeDetails datanodeDetails = + containerWithPipeline.getPipeline().getFirstNode(); + NodeManager nodeManagerMock = mock(NodeManager.class); + when(nodeManagerMock.getNodeByUuid(any())).thenReturn(datanodeDetails); + IncrementalContainerReportFromDatanode reportMock = + mock(IncrementalContainerReportFromDatanode.class); + when(reportMock.getDatanodeDetails()) + .thenReturn(containerWithPipeline.getPipeline().getFirstNode()); + + IncrementalContainerReportProto containerReport = + getIncrementalContainerReportProto(containerID, state, + datanodeDetails.getUuidString()); + when(reportMock.getReport()).thenReturn(containerReport); + ReconIncrementalContainerReportHandler reconIcr = + new ReconIncrementalContainerReportHandler(nodeManagerMock, + containerManager); + + reconIcr.onMessage(reportMock, mock(EventPublisher.class)); + assertTrue(containerManager.exists(containerID)); + assertEquals(1, + containerManager.getContainerReplicas(containerID).size()); + LifeCycleState expectedState = getContainerStateFromReplicaState(state); + LifeCycleState actualState = + containerManager.getContainer(containerID).getState(); + assertEquals(String.format("Expecting %s in " + + "container state for replica state %s", expectedState, + state), expectedState, actualState); + } + } + + private LifeCycleState getContainerStateFromReplicaState( + State state) { + switch (state) { + case CLOSING: return LifeCycleState.CLOSING; + case QUASI_CLOSED: return LifeCycleState.QUASI_CLOSED; + case CLOSED: return LifeCycleState.CLOSED; + default: return null; + } } private static IncrementalContainerReportProto diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java index 19290b16f135..c934caef22e5 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java @@ -26,12 +26,17 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -46,6 +51,7 @@ public class TestReconNodeManager { public TemporaryFolder temporaryFolder = new TemporaryFolder(); private OzoneConfiguration conf; + private DBStore store; @Before public void setUp() throws Exception { @@ -53,6 +59,12 @@ public void setUp() throws Exception { conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().getAbsolutePath()); conf.set(OZONE_SCM_NAMES, "localhost"); + store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); + } + + @After + public void tearDown() throws Exception { + store.close(); } @Test @@ -60,8 +72,10 @@ public void testReconNodeDB() throws IOException { ReconStorageConfig scmStorageConfig = new ReconStorageConfig(conf); EventQueue eventQueue = new EventQueue(); NetworkTopology clusterMap = new NetworkTopologyImpl(conf); + Table nodeTable = + ReconSCMDBDefinition.NODES.getTable(store); ReconNodeManager reconNodeManager = new ReconNodeManager(conf, - scmStorageConfig, eventQueue, clusterMap); + scmStorageConfig, eventQueue, clusterMap, nodeTable); ReconNewNodeHandler reconNewNodeHandler = new ReconNewNodeHandler(reconNodeManager); assertTrue(reconNodeManager.getAllNodes().isEmpty()); @@ -80,8 +94,8 @@ public void testReconNodeDB() throws IOException { // Close the DB, and recreate the instance of Recon Node Manager. eventQueue.close(); reconNodeManager.close(); - reconNodeManager = new ReconNodeManager(conf, - scmStorageConfig, eventQueue, clusterMap); + reconNodeManager = new ReconNodeManager(conf, scmStorageConfig, eventQueue, + clusterMap, nodeTable); // Verify that the node information was persisted and loaded back. assertEquals(1, reconNodeManager.getAllNodes().size()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java index c891f3321725..b190810db460 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java @@ -73,7 +73,7 @@ public void setup() throws IOException { temporaryFolder.newFolder().getAbsolutePath()); conf.set(OZONE_SCM_NAMES, "localhost"); scmStorageConfig = new ReconStorageConfig(conf); - store = DBStoreBuilder.createDBStore(conf, new ReconDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); } @After @@ -114,7 +114,7 @@ public void testInitialize() throws IOException { try (ReconPipelineManager reconPipelineManager = new ReconPipelineManager(conf, nodeManager, - ReconDBDefinition.PIPELINES.getTable(store), eventQueue)) { + ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue)) { reconPipelineManager.addPipeline(validPipeline); reconPipelineManager.addPipeline(invalidPipeline); @@ -150,7 +150,7 @@ public void testAddPipeline() throws IOException { ReconPipelineManager reconPipelineManager = new ReconPipelineManager(conf, nodeManager, - ReconDBDefinition.PIPELINES.getTable(store), eventQueue); + ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue); assertFalse(reconPipelineManager.containsPipeline(pipeline.getId())); reconPipelineManager.addPipeline(pipeline); assertTrue(reconPipelineManager.containsPipeline(pipeline.getId())); @@ -162,7 +162,7 @@ public void testStubbedReconPipelineFactory() throws IOException { NodeManager nodeManagerMock = mock(NodeManager.class); ReconPipelineManager reconPipelineManager = new ReconPipelineManager( - conf, nodeManagerMock, ReconDBDefinition.PIPELINES.getTable(store), + conf, nodeManagerMock, ReconSCMDBDefinition.PIPELINES.getTable(store), new EventQueue()); PipelineFactory pipelineFactory = reconPipelineManager.getPipelineFactory(); assertTrue(pipelineFactory instanceof ReconPipelineFactory); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java index d1cb1e96ae97..92c797be7fc2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,10 +35,12 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -54,6 +56,8 @@ public class TestOMDBUpdatesHandler { @Rule public TemporaryFolder folder = new TemporaryFolder(); + private OMDBDefinition omdbDefinition = new OMDBDefinition(); + private OzoneConfiguration createNewTestPath() throws IOException { OzoneConfiguration configuration = new OzoneConfiguration(); File newFolder = folder.newFolder(); @@ -149,6 +153,7 @@ public void testDelete() throws Exception { // Write 1 volume, 1 key into source and target OM DBs. String volumeKey = metaMgr.getVolumeKey("sampleVol"); + String nonExistVolumeKey = metaMgr.getVolumeKey("nonExistingVolume"); OmVolumeArgs args = OmVolumeArgs.newBuilder() .setVolume("sampleVol") @@ -165,6 +170,9 @@ public void testDelete() throws Exception { // Delete the volume and key from target DB. metaMgr.getKeyTable().delete("/sampleVol/bucketOne/key_one"); metaMgr.getVolumeTable().delete(volumeKey); + // Delete a non-existing volume and key + metaMgr.getKeyTable().delete("/sampleVol/bucketOne/key_two"); + metaMgr.getVolumeTable().delete(metaMgr.getVolumeKey("nonExistingVolume")); RDBStore rdbStore = (RDBStore) metaMgr.getStore(); RocksDB rocksDB = rdbStore.getDb(); @@ -191,7 +199,7 @@ public void testDelete() throws Exception { } List events = omdbUpdatesHandler.getEvents(); - assertTrue(events.size() == 2); + assertEquals(4, events.size()); OMDBUpdateEvent keyEvent = events.get(0); assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, keyEvent.getAction()); @@ -201,7 +209,35 @@ public void testDelete() throws Exception { OMDBUpdateEvent volEvent = events.get(1); assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, volEvent.getAction()); assertEquals(volumeKey, volEvent.getKey()); - assertNull(volEvent.getValue()); + assertNotNull(volEvent.getValue()); + OmVolumeArgs volumeInfo = (OmVolumeArgs) volEvent.getValue(); + assertEquals("sampleVol", volumeInfo.getVolume()); + + // Assert the values of non existent keys are set to null. + OMDBUpdateEvent nonExistKey = events.get(2); + assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, + nonExistKey.getAction()); + assertEquals("/sampleVol/bucketOne/key_two", nonExistKey.getKey()); + assertNull(nonExistKey.getValue()); + + OMDBUpdateEvent nonExistVolume = events.get(3); + assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, + nonExistVolume.getAction()); + assertEquals(nonExistVolumeKey, nonExistVolume.getKey()); + assertNull(nonExistVolume.getValue()); + } + + @Test + public void testGetKeyType() throws IOException { + OzoneConfiguration configuration = createNewTestPath(); + OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); + OMDBUpdatesHandler omdbUpdatesHandler = + new OMDBUpdatesHandler(metaMgr); + + assertEquals(String.class, omdbDefinition.getKeyType( + metaMgr.getKeyTable().getName()).get()); + assertEquals(OzoneTokenIdentifier.class, omdbDefinition.getKeyType( + metaMgr.getDelegationTokenTable().getName()).get()); } @Test @@ -211,12 +247,12 @@ public void testGetValueType() throws IOException { OMDBUpdatesHandler omdbUpdatesHandler = new OMDBUpdatesHandler(metaMgr); - assertEquals(OmKeyInfo.class, omdbUpdatesHandler.getValueType( - metaMgr.getKeyTable().getName())); - assertEquals(OmVolumeArgs.class, omdbUpdatesHandler.getValueType( - metaMgr.getVolumeTable().getName())); - assertEquals(OmBucketInfo.class, omdbUpdatesHandler.getValueType( - metaMgr.getBucketTable().getName())); + assertEquals(OmKeyInfo.class, omdbDefinition.getValueType( + metaMgr.getKeyTable().getName()).get()); + assertEquals(OmVolumeArgs.class, omdbDefinition.getValueType( + metaMgr.getVolumeTable().getName()).get()); + assertEquals(OmBucketInfo.class, omdbDefinition.getValueType( + metaMgr.getBucketTable().getName()).get()); } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java index ad03e67a81da..7d1323bacf60 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java @@ -131,7 +131,7 @@ public void testFailedTaskRetryLogic() throws Exception { } @Test - public void testBadBehavedTaskBlacklisting() throws Exception { + public void testBadBehavedTaskIsIgnored() throws Exception { String taskName = "Dummy_" + System.currentTimeMillis(); DummyReconDBTask dummyReconDBTask = new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.ALWAYS_FAIL); @@ -151,7 +151,7 @@ public void testBadBehavedTaskBlacklisting() throws Exception { .get(dummyReconDBTask.getTaskName())); } - //Should be blacklisted now. + //Should be ignored now. reconTaskController.consumeOMEvents(omUpdateEventBatchMock, omMetadataManagerMock); assertTrue(reconTaskController.getRegisteredTasks().isEmpty()); @@ -212,4 +212,4 @@ private ReconOmTask getMockTask(String taskName) { .thenReturn(Collections.singleton("MockTable")); return reconOmTaskMock; } -} \ No newline at end of file +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java new file mode 100644 index 000000000000..94d76731e2fb --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.TypedTable; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; +import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; +import org.jooq.DSLContext; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.util.ArrayList; + +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE; +import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; +import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE; +import static org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Unit test for Object Count Task. + */ +public class TestTableCountTask extends AbstractReconSqlDBTest { + + private GlobalStatsDao globalStatsDao; + private TableCountTask tableCountTask; + private DSLContext dslContext; + private boolean isSetupDone = false; + + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + private void initializeInjector() throws IOException { + ReconOMMetadataManager omMetadataManager = getTestReconOmMetadataManager( + initializeNewOmMetadataManager(temporaryFolder.newFolder()), + temporaryFolder.newFolder()); + globalStatsDao = getDao(GlobalStatsDao.class); + tableCountTask = new TableCountTask(globalStatsDao, getConfiguration(), + omMetadataManager); + dslContext = getDslContext(); + } + + @Before + public void setUp() throws IOException { + // The following setup runs only once + if (!isSetupDone) { + initializeInjector(); + isSetupDone = true; + } + // Truncate table before running each test + dslContext.truncate(GLOBAL_STATS); + } + + @Test + public void testReprocess() { + OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); + // Mock 5 rows in each table and test the count + for (String tableName: tableCountTask.getTaskTables()) { + TypedTable table = mock(TypedTable.class); + TypedTable.TypedTableIterator mockIter = mock(TypedTable + .TypedTableIterator.class); + when(table.iterator()).thenReturn(mockIter); + when(omMetadataManager.getTable(tableName)).thenReturn(table); + when(mockIter.hasNext()) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(true) + .thenReturn(false); + } + + Pair result = tableCountTask.reprocess(omMetadataManager); + assertTrue(result.getRight()); + + assertEquals(5L, getCountForTable(KEY_TABLE)); + assertEquals(5L, getCountForTable(VOLUME_TABLE)); + assertEquals(5L, getCountForTable(BUCKET_TABLE)); + assertEquals(5L, getCountForTable(OPEN_KEY_TABLE)); + assertEquals(5L, getCountForTable(DELETED_TABLE)); + } + + @Test + public void testProcess() { + ArrayList events = new ArrayList<>(); + // Create 5 put, 1 delete and 1 update event for each table + for (String tableName: tableCountTask.getTaskTables()) { + for (int i=0; i<5; i++) { + events.add(getOMUpdateEvent("item" + i, null, tableName, PUT)); + } + // for delete event, if value is set to null, the counter will not be + // decremented. This is because the value will be null if item does not + // exist in the database and there is no need to delete. + events.add(getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, + DELETE)); + events.add(getOMUpdateEvent("item1", null, tableName, UPDATE)); + } + OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(events); + tableCountTask.process(omUpdateEventBatch); + + // Verify 4 items in each table. (5 puts - 1 delete + 0 update) + assertEquals(4L, getCountForTable(KEY_TABLE)); + assertEquals(4L, getCountForTable(VOLUME_TABLE)); + assertEquals(4L, getCountForTable(BUCKET_TABLE)); + assertEquals(4L, getCountForTable(OPEN_KEY_TABLE)); + assertEquals(4L, getCountForTable(DELETED_TABLE)); + + // add a new key and simulate delete on non-existing item (value: null) + ArrayList newEvents = new ArrayList<>(); + for (String tableName: tableCountTask.getTaskTables()) { + newEvents.add(getOMUpdateEvent("item5", null, tableName, PUT)); + // This delete event should be a noop since value is null + newEvents.add(getOMUpdateEvent("item0", null, tableName, DELETE)); + } + + omUpdateEventBatch = new OMUpdateEventBatch(newEvents); + tableCountTask.process(omUpdateEventBatch); + + // Verify 5 items in each table. (1 new put + 0 delete) + assertEquals(5L, getCountForTable(KEY_TABLE)); + assertEquals(5L, getCountForTable(VOLUME_TABLE)); + assertEquals(5L, getCountForTable(BUCKET_TABLE)); + assertEquals(5L, getCountForTable(OPEN_KEY_TABLE)); + assertEquals(5L, getCountForTable(DELETED_TABLE)); + } + + private OMDBUpdateEvent getOMUpdateEvent(String name, Object value, + String table, + OMDBUpdateEvent.OMDBUpdateAction action) { + return new OMUpdateEventBuilder() + .setAction(action) + .setKey(name) + .setValue(value) + .setTable(table) + .build(); + } + + private long getCountForTable(String tableName) { + String key = TableCountTask.getRowKeyFromTable(tableName); + return globalStatsDao.findById(key).getValue(); + } +} diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 80bd34fa270d..4a62fc72dceb 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -19,12 +19,12 @@ org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-s3gateway Apache Hadoop Ozone S3 Gateway jar - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT UTF-8 true @@ -163,6 +163,11 @@ org.apache.hadoop hadoop-ozone-client + + org.apache.hadoop + hadoop-hdds-docs + provided + junit junit diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSSignatureProcessor.java similarity index 91% rename from hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java rename to hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSSignatureProcessor.java index 099221daa380..0cb82fb77b9e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4SignatureProcessor.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSSignatureProcessor.java @@ -43,6 +43,7 @@ import java.util.regex.Pattern; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV2; import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV4; import org.apache.hadoop.ozone.s3.header.Credential; @@ -54,15 +55,15 @@ import org.slf4j.LoggerFactory; /** - * Parser to process AWS v4 auth request. Creates string to sign and auth + * Parser to process AWS V2 & V4 auth request. Creates string to sign and auth * header. For more details refer to AWS documentation https://docs.aws * .amazon.com/general/latest/gr/sigv4-create-canonical-request.html. **/ @RequestScoped -public class AWSV4SignatureProcessor implements SignatureProcessor { +public class AWSSignatureProcessor implements SignatureProcessor { private final static Logger LOG = - LoggerFactory.getLogger(AWSV4SignatureProcessor.class); + LoggerFactory.getLogger(AWSSignatureProcessor.class); @Context private ContainerRequestContext context; @@ -72,13 +73,12 @@ public class AWSV4SignatureProcessor implements SignatureProcessor { private String uri; private String method; private AuthorizationHeaderV4 v4Header; + private AuthorizationHeaderV2 v2Header; private String stringToSign; @PostConstruct public void init() throws Exception { - LOG.info("Initializing request header parser"); - //header map is MUTABLE. It's better to save it here. (with lower case // keys!!!) this.headers = new LowerCaseKeyStringMap(new HashMap<>()); @@ -104,19 +104,21 @@ public void init() this.queryMap = context.getUriInfo().getQueryParameters(); - try { - this.uri = new URI(context.getUriInfo().getRequestUri() - .getPath().replaceAll("\\/+", - "/")).normalize().getPath(); - } catch (URISyntaxException e) { - throw S3_AUTHINFO_CREATION_ERROR; - } + this.uri = context.getUriInfo().getRequestUri().getPath(); this.method = context.getMethod(); - if (v4Header == null) { - v4Header = new AuthorizationHeaderV4(headers.get(AUTHORIZATION_HEADER)); + String authHeader = headers.get(AUTHORIZATION_HEADER); + String[] split = authHeader.split(" "); + if (split[0].equals(AuthorizationHeaderV2.IDENTIFIER)) { + if (v2Header == null) { + v2Header = new AuthorizationHeaderV2(authHeader); + } + } else { + if (v4Header == null) { + v4Header = new AuthorizationHeaderV4(authHeader); + } + parse(); } - parse(); } @@ -326,11 +328,13 @@ public static String hash(String payload) throws NoSuchAlgorithmException { } public String getAwsAccessId() { - return v4Header.getAccessKeyID(); + return (v4Header != null ? v4Header.getAccessKeyID() : + v2Header != null ? v2Header.getAccessKeyID() : ""); } public String getSignature() { - return v4Header.getSignature(); + return (v4Header != null ? v4Header.getSignature() : + v2Header != null ? v2Header.getSignature() : ""); } public String getStringToSign() throws Exception { @@ -348,6 +352,11 @@ public void setV4Header( this.v4Header = v4Header; } + @VisibleForTesting + public void setV2Header(AuthorizationHeaderV2 v2Header) { + this.v2Header = v2Header; + } + /** * A simple map which forces lower case key usage. */ diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java index 3cd7b7c5dc02..a3042c13a3cf 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java @@ -51,7 +51,7 @@ public class OzoneClientProducer { private OzoneClient client; @Inject - private SignatureProcessor v4RequestParser; + private SignatureProcessor signatureParser; @Inject private OzoneConfiguration ozoneConfiguration; @@ -76,7 +76,7 @@ public void destory() throws IOException { private OzoneClient getClient(OzoneConfiguration config) throws IOException { try { - String awsAccessId = v4RequestParser.getAwsAccessId(); + String awsAccessId = signatureParser.getAwsAccessId(); UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(awsAccessId); if (OzoneSecurityUtil.isSecurityEnabled(config)) { @@ -85,8 +85,8 @@ private OzoneClient getClient(OzoneConfiguration config) throws IOException { OzoneTokenIdentifier identifier = new OzoneTokenIdentifier(); identifier.setTokenType(S3AUTHINFO); - identifier.setStrToSign(v4RequestParser.getStringToSign()); - identifier.setSignature(v4RequestParser.getSignature()); + identifier.setStrToSign(signatureParser.getStringToSign()); + identifier.setSignature(signatureParser.getSignature()); identifier.setAwsAccessId(awsAccessId); identifier.setOwner(new Text(awsAccessId)); if (LOG.isTraceEnabled()) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java index fae1c823ca7e..5acf36876146 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java @@ -52,6 +52,12 @@ public final class S3GatewayConfigKeys { OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX + "kerberos.keytab"; public static final String OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL = OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX + "kerberos.principal"; + + public static final String OZONE_S3G_CLIENT_BUFFER_SIZE_KEY = + "ozone.s3g.client.buffer.size"; + public static final String OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT = + "4KB"; + /** * Never constructed. */ diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index ef02510704da..067d6a447c04 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -31,7 +31,6 @@ import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import javax.ws.rs.core.Response.Status; import java.io.IOException; import java.io.InputStream; import java.util.Iterator; @@ -253,12 +252,7 @@ public Response head(@PathParam("bucket") String bucketName) getBucket(bucketName); } catch (OS3Exception ex) { LOG.error("Exception occurred in headBucket", ex); - //TODO: use a subclass fo OS3Exception and catch it here. - if (ex.getCode().contains("NoSuchBucket")) { - return Response.status(Status.BAD_REQUEST).build(); - } else { - throw ex; - } + throw ex; } return Response.ok().build(); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 6f0ea57e8623..f6655602d7d0 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.ozone.s3.endpoint; +import javax.annotation.PostConstruct; +import javax.inject.Inject; import javax.ws.rs.Consumes; import javax.ws.rs.DELETE; import javax.ws.rs.DefaultValue; @@ -36,6 +38,7 @@ import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.StreamingOutput; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.time.Instant; @@ -49,6 +52,8 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; @@ -77,6 +82,9 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.tuple.Pair; + +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_KEY; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ENTITY_TOO_SMALL; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD; @@ -104,6 +112,7 @@ public class ObjectEndpoint extends EndpointBase { private HttpHeaders headers; private List customizableGetHeaders = new ArrayList<>(); + private int bufferSize; public ObjectEndpoint() { customizableGetHeaders.add("Content-Type"); @@ -114,6 +123,16 @@ public ObjectEndpoint() { customizableGetHeaders.add("Content-Encoding"); } + @Inject + private OzoneConfiguration ozoneConfiguration; + + @PostConstruct + public void init() { + bufferSize = (int) ozoneConfiguration.getStorageSize( + OZONE_S3G_CLIENT_BUFFER_SIZE_KEY, + OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT, StorageUnit.BYTES); + } + /** * Rest endpoint to upload object to a bucket. *

@@ -259,7 +278,9 @@ public Response get( try (S3WrapperInputStream s3WrapperInputStream = new S3WrapperInputStream( key.getInputStream())) { - s3WrapperInputStream.copyLarge(dest, startOffset, copyLength); + s3WrapperInputStream.seek(startOffset); + IOUtils.copyLarge(s3WrapperInputStream, dest, 0, + copyLength, new byte[bufferSize]); } }; responseBuilder = Response @@ -400,7 +421,6 @@ public Response delete( return Response .status(Status.NO_CONTENT) .build(); - } /** @@ -539,16 +559,15 @@ private Response createMultipartKey(String bucket, String key, long length, if (range != null) { RangeHeader rangeHeader = RangeHeaderParserUtil.parseRangeHeader(range, 0); - - long copyLength = rangeHeader.getEndOffset() - - rangeHeader.getStartOffset(); - - try (S3WrapperInputStream s3WrapperInputStream = - new S3WrapperInputStream( - sourceObject.getInputStream())) { - s3WrapperInputStream.copyLarge(ozoneOutputStream, - rangeHeader.getStartOffset(), copyLength); + final long skipped = + sourceObject.skip(rangeHeader.getStartOffset()); + if (skipped != rangeHeader.getStartOffset()) { + throw new EOFException( + "Bytes to skip: " + + rangeHeader.getStartOffset() + " actual: " + skipped); } + IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, + rangeHeader.getEndOffset() - rangeHeader.getStartOffset()); } else { IOUtils.copy(sourceObject, ozoneOutputStream); } @@ -578,7 +597,6 @@ private Response createMultipartKey(String bucket, String key, long length, } throw ex; } - } /** diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java index dfafc3a5acad..fe096cee9ff5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java @@ -29,7 +29,7 @@ */ public class AuthorizationHeaderV2 { - private final static String IDENTIFIER = "AWS"; + public final static String IDENTIFIER = "AWS"; private String authHeader; private String identifier; private String accessKeyID; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java index 0d9f092b8e36..1e48689a86c7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java @@ -35,8 +35,8 @@ import static org.apache.commons.lang3.StringUtils.isEmpty; import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; -import static org.apache.hadoop.ozone.s3.AWSV4SignatureProcessor.AWS4_SIGNING_ALGORITHM; -import static org.apache.hadoop.ozone.s3.AWSV4SignatureProcessor.DATE_FORMATTER; +import static org.apache.hadoop.ozone.s3.AWSSignatureProcessor.AWS4_SIGNING_ALGORITHM; +import static org.apache.hadoop.ozone.s3.AWSSignatureProcessor.DATE_FORMATTER; /** * S3 Authorization header. diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java index edf90edd9a3e..d88287c4edbb 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java @@ -23,14 +23,12 @@ import java.io.IOException; import java.io.InputStream; -import java.io.OutputStream; /** * S3Wrapper Input Stream which encapsulates KeyInputStream from ozone. */ public class S3WrapperInputStream extends FSInputStream { private final KeyInputStream inputStream; - private static final int DEFAULT_BUFFER_SIZE = 32 * 1024; /** * Constructs S3WrapperInputStream with KeyInputStream. @@ -75,36 +73,12 @@ public long getPos() throws IOException { } @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; + public long skip(long n) throws IOException { + return inputStream.skip(n); } - /** - * Copies some or all bytes from a large (over 2GB) InputStream - * to an OutputStream, optionally skipping input bytes. - *

- * Copy the method from IOUtils of commons-io to reimplement skip by seek - * rather than read. The reason why IOUtils of commons-io implement skip - * by read can be found at - * IO-203. - *

- *

- * This method buffers the input internally, so there is no need to use a - * BufferedInputStream. - *

- * The buffer size is given by {@link #DEFAULT_BUFFER_SIZE}. - * - * @param output the OutputStream to write to - * @param inputOffset : number of bytes to skip from input before copying - * -ve values are ignored - * @param length : number of bytes to copy. -ve means all - * @return the number of bytes copied - * @throws NullPointerException if the input or output is null - * @throws IOException if an I/O error occurs - */ - public long copyLarge(final OutputStream output, final long inputOffset, - final long length) throws IOException { - return inputStream.copyLarge(output, inputOffset, length, - new byte[DEFAULT_BUFFER_SIZE]); + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + return false; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java index 5f5c827433d2..a57a0f688e63 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java @@ -82,8 +82,8 @@ public boolean isInValidRange() { public String toString() { - return "startOffset - [" + startOffset + "]" + "endOffset - [" + - endOffset + "]" + " readFull - [ " + readFull + "]" + " invalidRange " + - "- [ " + inValidRange + "]"; + return "startOffset - [" + startOffset + "]" + ", endOffset - [" + + endOffset + "]" + ", readFull - [" + readFull + "]" + + ", invalidRange - [" + inValidRange + "]"; } } diff --git a/hadoop-ozone/s3gateway/src/main/resources/browser.html b/hadoop-ozone/s3gateway/src/main/resources/browser.html index 0405b17e90b5..4e6a00bfec9d 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/browser.html +++ b/hadoop-ozone/s3gateway/src/main/resources/browser.html @@ -138,7 +138,7 @@ - + diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html index b20bf3530da0..81158f1a1306 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html +++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html @@ -75,7 +75,7 @@

S3 gateway

- + diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSV4SignatureProcessor.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSSignatureProcessor.java similarity index 69% rename from hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSV4SignatureProcessor.java rename to hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSSignatureProcessor.java index 11b3b91724df..239e2857957b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSV4SignatureProcessor.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSSignatureProcessor.java @@ -24,6 +24,7 @@ import java.net.URI; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV2; import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV4; import org.junit.Assert; @@ -33,10 +34,10 @@ /** * Test the Auth parser. */ -public class TestAWSV4SignatureProcessor { +public class TestAWSSignatureProcessor { @Test - public void testInitialization() throws Exception { + public void testV4Initialization() throws Exception { MultivaluedMap headers = new MultivaluedHashMap<>(); headers.putSingle("Content-Length", "123"); @@ -76,7 +77,7 @@ public void validateDateRange() throws OS3Exception { Mockito.when(mock.getMethod()).thenReturn("GET"); Mockito.when(mock.getUriInfo()).thenReturn(uriInfo); - AWSV4SignatureProcessor parser = new AWSV4SignatureProcessor() { + AWSSignatureProcessor parser = new AWSSignatureProcessor() { @Override void validateSignedHeader(String header, String headerValue) throws OS3Exception { @@ -100,4 +101,41 @@ void validateSignedHeader(String header, String headerValue) "f20d4de80af2271545385e8d4c7df608cae70a791c69b97aab1527ed93a0d665", parser.getStringToSign()); } + + @Test + public void testV2Initialization() throws Exception { + + MultivaluedMap headers = new MultivaluedHashMap<>(); + String authHeader = "AWS root:ixWQAgWvJDuqLUqgDG9o4b2HF7c="; + headers.putSingle("Authorization", authHeader); + + AuthorizationHeaderV2 parserAuthHeader = + new AuthorizationHeaderV2(authHeader); + + MultivaluedMap queryParameters = new MultivaluedHashMap<>(); + + UriInfo uriInfo = Mockito.mock(UriInfo.class); + Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryParameters); + Mockito.when(uriInfo.getRequestUri()) + .thenReturn(new URI("http://localhost/buckets")); + + ContainerRequestContext mock = Mockito.mock(ContainerRequestContext.class); + Mockito.when(mock.getHeaders()).thenReturn(headers); + Mockito.when(mock.getMethod()).thenReturn("GET"); + Mockito.when(mock.getUriInfo()).thenReturn(uriInfo); + + AWSSignatureProcessor parser = new AWSSignatureProcessor() { + @Override + void validateSignedHeader(String header, String headerValue) + throws OS3Exception { + super.validateSignedHeader(header, headerValue); + } + }; + parser.setV2Header(parserAuthHeader); + parser.setContext(mock); + parser.init(); + + Assert.assertEquals("root", parser.getAwsAccessId()); + Assert.assertEquals("ixWQAgWvJDuqLUqgDG9o4b2HF7c=", parser.getSignature()); + } } \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java index 6f991e66fc86..18b4b2c7f8f9 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java @@ -26,7 +26,10 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.Assert; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static org.junit.Assert.assertEquals; import org.junit.Before; import org.junit.Test; @@ -60,7 +63,11 @@ public void testHeadBucket() throws Exception { @Test public void testHeadFail() throws Exception { - Response response = bucketEndpoint.head("unknownbucket"); - Assert.assertEquals(400, response.getStatus()); + try { + bucketEndpoint.head("unknownbucket"); + } catch (OS3Exception ex) { + Assert.assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + Assert.assertEquals("NoSuchBucket", ex.getCode()); + } } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java index 014cb3e5ae8a..b4a21e3ae583 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java @@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static org.apache.hadoop.ozone.s3.AWSV4SignatureProcessor.DATE_FORMATTER; +import static org.apache.hadoop.ozone.s3.AWSSignatureProcessor.DATE_FORMATTER; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java index e5d5562cd1a5..5ca1c4522414 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java @@ -26,7 +26,7 @@ import java.time.LocalDate; import static java.time.temporal.ChronoUnit.DAYS; -import static org.apache.hadoop.ozone.s3.AWSV4SignatureProcessor.DATE_FORMATTER; +import static org.apache.hadoop.ozone.s3.AWSSignatureProcessor.DATE_FORMATTER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 7fb083325ecc..0fbc7f1a477c 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT hadoop-ozone-tools - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Tools Apache Hadoop Ozone Tools jar @@ -63,6 +63,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-server-framework + + org.apache.hadoop + hadoop-ozone-recon + + + org.springframework + spring-jdbc + + + org.apache.hadoop hadoop-hdfs @@ -105,6 +115,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.amazonaws aws-java-sdk-s3 + + org.kohsuke.metainf-services + metainf-services + com.github.spotbugs spotbugs diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java deleted file mode 100644 index 81f8f649b694..000000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.admin; - -import java.io.IOException; - -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.MutableConfigurationSource; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; -import org.apache.hadoop.hdds.scm.cli.ReplicationManagerCommands; -import org.apache.hadoop.hdds.scm.cli.SafeModeCommands; -import org.apache.hadoop.hdds.scm.cli.TopologySubcommand; -import org.apache.hadoop.hdds.scm.cli.container.ContainerCommands; -import org.apache.hadoop.hdds.scm.cli.container.WithScmClient; -import org.apache.hadoop.hdds.scm.cli.datanode.DatanodeCommands; -import org.apache.hadoop.hdds.scm.cli.pipeline.PipelineCommands; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.ozone.admin.om.OMAdmin; -import org.apache.hadoop.util.NativeCodeLoader; - -import org.apache.commons.lang3.StringUtils; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import picocli.CommandLine; -import picocli.CommandLine.Option; - -/** - * Ozone Admin Command line tool. - */ -@CommandLine.Command(name = "ozone admin", - hidden = true, - description = "Developer tools for Ozone Admin operations", - versionProvider = HddsVersionProvider.class, - subcommands = { - OMAdmin.class, - SafeModeCommands.class, - ContainerCommands.class, - PipelineCommands.class, - DatanodeCommands.class, - TopologySubcommand.class, - ReplicationManagerCommands.class - }, - mixinStandardHelpOptions = true) -public class OzoneAdmin extends GenericCli implements WithScmClient { - - private OzoneConfiguration ozoneConf; - - @Option(names = {"--scm"}, description = "The destination scm (host:port)") - private String scm = ""; - - public OzoneConfiguration getOzoneConf() { - if (ozoneConf == null) { - ozoneConf = createOzoneConfiguration(); - } - return ozoneConf; - } - - /** - * Main for the Ozone Admin shell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { - LogManager.resetConfiguration(); - Logger.getRootLogger().setLevel(Level.INFO); - Logger.getRootLogger() - .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); - Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); - - new OzoneAdmin().run(argv); - } - - public ScmClient createScmClient() { - try { - OzoneConfiguration conf = createOzoneConfiguration(); - checkAndSetSCMAddressArg(conf); - - return new ContainerOperationClient(conf); - } catch (IOException ex) { - throw new IllegalArgumentException("Can't create SCM client", ex); - } - } - - private void checkAndSetSCMAddressArg(MutableConfigurationSource conf) { - if (StringUtils.isNotEmpty(scm)) { - conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm); - } - if (!HddsUtils.getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) { - - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY - + " should be set in ozone-site.xml or with the --scm option"); - } - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java index d4da5227d47f..f9321ab5cf2f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java @@ -19,11 +19,12 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.admin.OzoneAdmin; import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -35,6 +36,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import org.apache.ratis.protocol.ClientId; +import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -50,7 +52,8 @@ subcommands = { GetServiceRolesSubcommand.class }) -public class OMAdmin extends GenericCli { +@MetaInfServices(SubcommandWithParent.class) +public class OMAdmin extends GenericCli implements SubcommandWithParent { @CommandLine.ParentCommand private OzoneAdmin parent; @@ -102,4 +105,9 @@ public OzoneManagerProtocolClientSideTranslatorPB createOmClient( conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY)); } } + + @Override + public Class getParentType() { + return OzoneAdmin.class; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index c89793c34b87..4f69da78b905 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -22,12 +22,22 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - +import java.util.Map; +import java.util.HashMap; +import java.util.HashSet; +import com.google.gson.GsonBuilder; +import com.google.gson.Gson; +import com.google.gson.JsonObject; +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; import org.apache.hadoop.ozone.OzoneConsts; @@ -43,12 +53,7 @@ import org.apache.hadoop.ozone.shell.keys.KeyHandler; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; - -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; -import org.apache.ratis.protocol.ClientId; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Parameters; @@ -58,7 +63,9 @@ @Command(name = "chunkinfo", description = "returns chunk location" + " information about an existing key") -public class ChunkKeyHandler extends KeyHandler { +@MetaInfServices(SubcommandWithParent.class) +public class ChunkKeyHandler extends KeyHandler implements + SubcommandWithParent { @Parameters(arity = "1..1", description = "key to be located") private String uri; @@ -66,7 +73,6 @@ public class ChunkKeyHandler extends KeyHandler { private ContainerOperationClient containerOperationClient; private XceiverClientManager xceiverClientManager; private XceiverClientSpi xceiverClient; - private final ClientId clientId = ClientId.randomId(); private OzoneManagerProtocol ozoneManagerClient; private String getChunkLocationPath(String containerLocation) { @@ -75,22 +81,22 @@ private String getChunkLocationPath(String containerLocation) { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException, OzoneClientException{ containerOperationClient = new - ContainerOperationClient(createOzoneConfiguration()); + ContainerOperationClient(createOzoneConfiguration()); xceiverClientManager = containerOperationClient - .getXceiverClientManager(); + .getXceiverClientManager(); ozoneManagerClient = client.getObjectStore().getClientProxy() .getOzoneManagerClient(); address.ensureKeyAddress(); - JsonObject jsonObj = new JsonObject(); JsonElement element; + JsonObject result = new JsonObject(); String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); String keyName = address.getKeyName(); List tempchunks = null; List chunkDetailsList = new ArrayList(); - List chunkPaths = new ArrayList(); + HashSet chunkPaths = new HashSet<>(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -98,19 +104,31 @@ protected void execute(OzoneClient client, OzoneAddress address) .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); - List locationInfos = keyInfo - .getLatestVersionLocations().getBlocksLatestVersionOnly(); // querying the keyLocations.The OM is queried to get containerID and // localID pertaining to a given key + List locationInfos = keyInfo + .getLatestVersionLocations().getBlocksLatestVersionOnly(); + // for zero-sized key + if(locationInfos.isEmpty()){ + System.out.println("No Key Locations Found"); + return; + } ChunkLayOutVersion chunkLayOutVersion = ChunkLayOutVersion .getConfiguredVersion(getConf()); + JsonArray responseArrayList = new JsonArray(); for (OmKeyLocationInfo keyLocation:locationInfos) { ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo(); ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo(); long containerId = keyLocation.getContainerID(); + chunkPaths.clear(); Token token = keyLocation.getToken(); + Pipeline pipeline = keyLocation.getPipeline(); + if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { + pipeline = Pipeline.newBuilder(pipeline) + .setType(HddsProtos.ReplicationType.STAND_ALONE).build(); + } xceiverClient = xceiverClientManager - .acquireClient(keyLocation.getPipeline()); + .acquireClientForReadData(pipeline); // Datanode is queried to get chunk information.Thus querying the // OM,SCM and datanode helps us get chunk location information if (token != null) { @@ -118,57 +136,71 @@ protected void execute(OzoneClient client, OzoneAddress address) } ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID() .getDatanodeBlockIDProtobuf(); - ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls - .getBlock(xceiverClient, datanodeBlockID); - tempchunks = response.getBlockData().getChunksList(); - ContainerProtos.ContainerDataProto containerData = - containerOperationClient.readContainer( - keyLocation.getContainerID(), - keyLocation.getPipeline()); - for (ContainerProtos.ChunkInfo chunkInfo:tempchunks) { - ChunkDetails chunkDetails = new ChunkDetails(); - chunkDetails.setChunkName(chunkInfo.getChunkName()); - chunkDetails.setChunkOffset(chunkInfo.getOffset()); - chunkDetailsList.add(chunkDetails); - chunkPaths.add(chunkLayOutVersion.getChunkFile(new File( - getChunkLocationPath(containerData.getContainerPath())), - keyLocation.getBlockID(), - ChunkInfo.getFromProtoBuf(chunkInfo)).toString()); - } - containerChunkInfoVerbose - .setContainerPath(containerData.getContainerPath()); - containerChunkInfoVerbose - .setDataNodeList(keyLocation.getPipeline().getNodes()); - containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline()); - containerChunkInfoVerbose.setChunkInfos(chunkDetailsList); - containerChunkInfo.setFiles(chunkPaths); - List chunkDataNodeDetails = new - ArrayList(); - for (DatanodeDetails datanodeDetails:keyLocation - .getPipeline().getNodes()) { - chunkDataNodeDetails.add( - new ChunkDataNodeDetails(datanodeDetails.getIpAddress(), - datanodeDetails.getHostName())); + // doing a getBlock on all nodes + HashMap + responses = null; + try { + responses = ContainerProtocolCalls + .getBlockFromAllNodes(xceiverClient, datanodeBlockID); + } catch (InterruptedException e) { + LOG.error("Execution interrupted due to " + e); } - containerChunkInfo.setChunkDataNodeDetails(chunkDataNodeDetails); - containerChunkInfo.setPipelineID( - keyLocation.getPipeline().getId().getId()); - Gson gson = new GsonBuilder().create(); - if (isVerbose()) { - element = gson.toJsonTree(containerChunkInfoVerbose); - jsonObj.add("container Id :" + containerId + " " - + "blockId :" + keyLocation.getLocalID() + "", element); - } else { - element = gson.toJsonTree(containerChunkInfo); - jsonObj.add("container Id :" + containerId + " " - + "blockId :" + keyLocation.getLocalID() + "", element); + JsonArray responseFromAllNodes = new JsonArray(); + for (Map.Entry + entry: responses.entrySet()) { + JsonObject jsonObj = new JsonObject(); + if(entry.getValue() == null){ + LOG.error("Cant execute getBlock on this node"); + continue; + } + tempchunks = entry.getValue().getBlockData().getChunksList(); + ContainerProtos.ContainerDataProto containerData = + containerOperationClient.readContainer( + keyLocation.getContainerID(), + keyLocation.getPipeline()); + for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) { + String fileName = chunkLayOutVersion.getChunkFile(new File( + getChunkLocationPath(containerData.getContainerPath())), + keyLocation.getBlockID(), + ChunkInfo.getFromProtoBuf(chunkInfo)).toString(); + chunkPaths.add(fileName); + ChunkDetails chunkDetails = new ChunkDetails(); + chunkDetails.setChunkName(fileName); + chunkDetails.setChunkOffset(chunkInfo.getOffset()); + chunkDetailsList.add(chunkDetails); + } + containerChunkInfoVerbose + .setContainerPath(containerData.getContainerPath()); + containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline()); + containerChunkInfoVerbose.setChunkInfos(chunkDetailsList); + containerChunkInfo.setFiles(chunkPaths); + containerChunkInfo.setPipelineID( + keyLocation.getPipeline().getId().getId()); + Gson gson = new GsonBuilder().create(); + if (isVerbose()) { + element = gson.toJsonTree(containerChunkInfoVerbose); + } else { + element = gson.toJsonTree(containerChunkInfo); + } + jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName()); + jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress()); + jsonObj.addProperty("Container-ID", containerId); + jsonObj.addProperty("Block-ID", keyLocation.getLocalID()); + jsonObj.add("Locations", element); + responseFromAllNodes.add(jsonObj); + xceiverClientManager.releaseClientForReadData(xceiverClient, false); } + responseArrayList.add(responseFromAllNodes); } - xceiverClientManager.releaseClient(xceiverClient, false); - Gson gson = new GsonBuilder().setPrettyPrinting().create(); - String prettyJson = gson.toJson(jsonObj); + result.add("KeyLocations", responseArrayList); + Gson gson2 = new GsonBuilder().setPrettyPrinting().create(); + String prettyJson = gson2.toJson(result); System.out.println(prettyJson); } + @Override + public Class getParentType() { + return OzoneDebug.class; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java index 0e969c7dcf80..cf57d95397bb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java @@ -19,9 +19,10 @@ package org.apache.hadoop.ozone.debug; import com.fasterxml.jackson.annotation.JsonInclude; + +import java.util.HashSet; import java.util.List; import java.util.UUID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; /** @@ -30,19 +31,12 @@ @JsonInclude(JsonInclude.Include.NON_NULL) public class ContainerChunkInfo { private String containerPath; - private List dataNodeList; private List chunkInfos; - private List files; - private List chunkDataNodeDetails; + private HashSet files; private UUID pipelineID; private Pipeline pipeline; - public void setChunkDataNodeDetails(List - chunkDataNodeDetails) { - this.chunkDataNodeDetails = chunkDataNodeDetails; - } - - public void setFiles(List files) { + public void setFiles(HashSet files) { this.files = files; } @@ -66,9 +60,6 @@ public void setChunkInfos(List chunkInfos) { this.chunkInfos = chunkInfos; } - public void setDataNodeList(List dataNodeList) { - this.dataNodeList = dataNodeList; - } @Override public String toString() { @@ -76,8 +67,6 @@ public String toString() { + "containerPath='" + containerPath + '\'' - + ", dataNodeList=" - + dataNodeList + ", chunkInfos=" + chunkInfos + ", pipeline=" @@ -85,8 +74,6 @@ public String toString() { + '}' + "files=" + files - + "chunkdatanodeDetails=" - + chunkDataNodeDetails + "PipelineID=" + pipelineID; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java index 3f28a64e84bc..d9d0d704d85e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java @@ -18,10 +18,16 @@ package org.apache.hadoop.ozone.debug; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_CONTAINER_KEY_DB; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB; + import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.om.codec.OMDBDefinition; +import org.apache.hadoop.ozone.recon.scm.ReconSCMDBDefinition; +import org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition; +import java.util.Arrays; import java.util.HashMap; /** @@ -36,14 +42,26 @@ private DBDefinitionFactory() { static { dbMap = new HashMap<>(); - dbMap.put(new SCMDBDefinition().getName(), new SCMDBDefinition()); - dbMap.put(new OMDBDefinition().getName(), new OMDBDefinition()); + Arrays.asList( + new SCMDBDefinition(), + new OMDBDefinition(), + new ReconSCMDBDefinition() + ).forEach(dbDefinition -> dbMap.put(dbDefinition.getName(), dbDefinition)); } public static DBDefinition getDefinition(String dbName){ if (dbMap.containsKey(dbName)){ return dbMap.get(dbName); } + return getReconDBDefinition(dbName); + } + + private static DBDefinition getReconDBDefinition(String dbName){ + if (dbName.startsWith(RECON_CONTAINER_KEY_DB)) { + return new ReconDBDefinition(dbName); + } else if (dbName.startsWith(RECON_OM_SNAPSHOT_DB)) { + return new OMDBDefinition(); + } return null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java index 47fc8bc9cfb2..b1139df9595e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java @@ -18,19 +18,30 @@ package org.apache.hadoop.ozone.debug; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; -import org.apache.hadoop.hdds.utils.db.DBDefinition; -import org.apache.hadoop.ozone.OzoneConsts; -import org.rocksdb.*; -import picocli.CommandLine; import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.ozone.OzoneConsts; + +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import org.kohsuke.MetaInfServices; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.Options; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksIterator; +import picocli.CommandLine; + /** * Parser for scm.db file. */ @@ -38,36 +49,74 @@ name = "scan", description = "Parse specified metadataTable" ) -public class DBScanner implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class DBScanner implements Callable, SubcommandWithParent { @CommandLine.Option(names = {"--column_family"}, - description = "Table name") + required = true, + description = "Table name") private String tableName; + @CommandLine.Option(names = {"--with-keys"}, + description = "List Key -> Value instead of just Value.", + defaultValue = "false", + showDefaultValue = CommandLine.Help.Visibility.ALWAYS) + private static boolean withKey; + + @CommandLine.Option(names = {"--length", "-l"}, + description = "Maximum number of items to list") + private static int limit = 100; + @CommandLine.ParentCommand private RDBParser parent; private HashMap columnFamilyMap; - private static void displayTable(RocksDB rocksDB, - DBColumnFamilyDefinition dbColumnFamilyDefinition, - List list) throws IOException { - ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( - dbColumnFamilyDefinition.getTableName() - .getBytes(StandardCharsets.UTF_8), list); - if (columnFamilyHandle==null){ - throw new IllegalArgumentException("columnFamilyHandle is null"); - } - RocksIterator iterator = rocksDB.newIterator(columnFamilyHandle); + private List scannedObjects; + + private static List displayTable(RocksIterator iterator, + DBColumnFamilyDefinition dbColumnFamilyDefinition) throws IOException { + List outputs = new ArrayList<>(); iterator.seekToFirst(); - while (iterator.isValid()){ + while (iterator.isValid() && limit > 0){ + StringBuilder result = new StringBuilder(); + if (withKey) { + Object key = dbColumnFamilyDefinition.getKeyCodec() + .fromPersistedFormat(iterator.key()); + Gson gson = new GsonBuilder().setPrettyPrinting().create(); + result.append(gson.toJson(key)); + result.append(" -> "); + } Object o = dbColumnFamilyDefinition.getValueCodec() .fromPersistedFormat(iterator.value()); + outputs.add(o); Gson gson = new GsonBuilder().setPrettyPrinting().create(); - String result = gson.toJson(o); - System.out.println(result); + result.append(gson.toJson(o)); + System.out.println(result.toString()); + limit--; iterator.next(); } + return outputs; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public RDBParser getParent() { + return parent; + } + + public void setParent(RDBParser parent) { + this.parent = parent; + } + + public static void setLimit(int limit) { + DBScanner.limit = limit; + } + + public List getScannedObjects() { + return scannedObjects; } private static ColumnFamilyHandle getColumnFamilyHandle( @@ -123,6 +172,10 @@ public Void call() throws Exception { private void printAppropriateTable( List columnFamilyHandleList, RocksDB rocksDB, String dbPath) throws IOException { + if (limit < 1) { + throw new IllegalArgumentException( + "List length should be a positive number"); + } dbPath = removeTrailingSlashIfNeeded(dbPath); this.constructColumnFamilyMap(DBDefinitionFactory. getDefinition(new File(dbPath).getName())); @@ -132,7 +185,15 @@ private void printAppropriateTable( } else { DBColumnFamilyDefinition columnFamilyDefinition = this.columnFamilyMap.get(tableName); - displayTable(rocksDB, columnFamilyDefinition, columnFamilyHandleList); + ColumnFamilyHandle columnFamilyHandle = getColumnFamilyHandle( + columnFamilyDefinition.getTableName() + .getBytes(StandardCharsets.UTF_8), + columnFamilyHandleList); + if (columnFamilyHandle == null) { + throw new IllegalArgumentException("columnFamilyHandle is null"); + } + RocksIterator iterator = rocksDB.newIterator(columnFamilyHandle); + scannedObjects = displayTable(iterator, columnFamilyDefinition); } } else { System.out.println("Incorrect db Path"); @@ -145,4 +206,10 @@ private String removeTrailingSlashIfNeeded(String dbPath) { } return dbPath; } + + @Override + public Class getParentType() { + return RDBParser.class; + } } + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java index 5aa5ed2ad4d3..be1cd592d7cf 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java @@ -18,14 +18,17 @@ package org.apache.hadoop.ozone.debug; -import org.rocksdb.Options; -import org.rocksdb.RocksDB; -import picocli.CommandLine; - import java.nio.charset.StandardCharsets; import java.util.List; import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; + +import org.kohsuke.MetaInfServices; +import org.rocksdb.Options; +import org.rocksdb.RocksDB; +import picocli.CommandLine; + /** * List all column Families/Tables in db. */ @@ -34,7 +37,8 @@ aliases = "ls", description = "list all column families in db." ) -public class ListTables implements Callable { +@MetaInfServices(SubcommandWithParent.class) +public class ListTables implements Callable, SubcommandWithParent { @CommandLine.ParentCommand private RDBParser parent; @@ -48,4 +52,9 @@ public Void call() throws Exception { } return null; } + + @Override + public Class getParentType() { + return RDBParser.class; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java index 82808d6acd92..9647396f624a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.ozone.segmentparser.RatisLogParser; + import picocli.CommandLine; /** @@ -29,14 +29,13 @@ @CommandLine.Command(name = "ozone debug", description = "Developer tools for Ozone Debug operations", versionProvider = HddsVersionProvider.class, - subcommands = { - ChunkKeyHandler.class, - RatisLogParser.class, - RDBParser.class - }, mixinStandardHelpOptions = true) public class OzoneDebug extends GenericCli { + public OzoneDebug() { + super(OzoneDebug.class); + } + /** * Main for the Ozone Debug shell Command handling. * diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java index ae82ba110353..f133386ab13f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java @@ -18,31 +18,49 @@ package org.apache.hadoop.ozone.debug; +import java.util.concurrent.Callable; + import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; + +import org.kohsuke.MetaInfServices; import picocli.CommandLine; +import picocli.CommandLine.Model.CommandSpec; +import picocli.CommandLine.Spec; /** * Tool that parses rocksdb file. */ @CommandLine.Command( name = "ldb", - description = "Parse rocksdb file content", - subcommands = { - DBScanner.class, - ListTables.class - }) -public class RDBParser extends GenericCli { + description = "Parse rocksdb file content") +@MetaInfServices(SubcommandWithParent.class) +public class RDBParser implements Callable, SubcommandWithParent { + + @Spec + private CommandSpec spec; @CommandLine.Option(names = {"--db"}, - description = "Database File Path") - private String dbPath; + required = true, + description = "Database File Path") + private String dbPath; public String getDbPath() { return dbPath; } + public void setDbPath(String dbPath) { + this.dbPath = dbPath; + } + + @Override + public Class getParentType() { + return OzoneDebug.class; + } + @Override - public void execute(String[] argv) { - new RDBParser().run(argv); + public Void call() throws Exception { + GenericCli.missingSubcommand(spec); + return null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index aa8282e57a35..1cfff127097c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -61,6 +61,7 @@ import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -301,6 +302,16 @@ public void printReport() { messages.forEach(print); } + /** + * Print out reports with the given message. + */ + public void print(String msg){ + Consumer print = freonCommand.isInteractive() + ? System.out::println + : LOG::info; + print.accept(msg); + } + /** * Create the OM RPC client to use it for testing. */ @@ -310,6 +321,17 @@ public OzoneManagerProtocolClientSideTranslatorPB createOmClient( RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, ProtobufRpcEngine.class); String clientId = ClientId.randomId().toString(); + + if (omServiceID == null) { + + //if only one serviceId is configured, use that + final String[] configuredServiceIds = + conf.getTrimmedStrings(OZONE_OM_SERVICE_IDS_KEY); + if (configuredServiceIds.length == 1) { + omServiceID = configuredServiceIds[0]; + } + } + OmTransport transport = OmTransportFactory.create(conf, ugi, omServiceID); return new OzoneManagerProtocolClientSideTranslatorPB(transport, clientId); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java index c6ec60e59a25..542634c4884b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java @@ -20,6 +20,7 @@ import java.io.OutputStream; import java.nio.charset.StandardCharsets; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.RandomStringUtils; /** @@ -38,15 +39,25 @@ public class ContentGenerator { */ private int bufferSize; + /** + * Number of bytes to write in one call. + *

+ * Should be no larger than the bufferSize. + */ + private final int copyBufferSize; + private final byte[] buffer; ContentGenerator(long keySize, int bufferSize) { + this(keySize, bufferSize, bufferSize); + } + + ContentGenerator(long keySize, int bufferSize, int copyBufferSize) { this.keySize = keySize; this.bufferSize = bufferSize; - + this.copyBufferSize = copyBufferSize; buffer = RandomStringUtils.randomAscii(bufferSize) .getBytes(StandardCharsets.UTF_8); - } /** @@ -56,7 +67,21 @@ public void write(OutputStream outputStream) throws IOException { for (long nrRemaining = keySize; nrRemaining > 0; nrRemaining -= bufferSize) { int curSize = (int) Math.min(bufferSize, nrRemaining); - outputStream.write(buffer, 0, curSize); + if (copyBufferSize == 1) { + for (int i = 0; i < curSize; i++) { + outputStream.write(buffer[i]); + } + } else { + for (int i = 0; i < curSize; i += copyBufferSize) { + outputStream.write(buffer, i, + Math.min(copyBufferSize, curSize - i)); + } + } } } + + @VisibleForTesting + byte[] getBuffer() { + return buffer; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java new file mode 100644 index 000000000000..7300fa5441eb --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.freon; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Callable; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.ozone.OzoneSecurityUtil; +import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChecksumData; + +import com.codahale.metrics.Timer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +/** + * Data validator of chunks to use pure datanode XCeiver interface. + */ +@Command(name = "dcv", + aliases = "datanode-chunk-validator", + description = "Validate generated Chunks are the same ", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true, + showDefaultValues = true) +public class DatanodeChunkValidator extends BaseFreonGenerator + implements Callable { + + private static final Logger LOG = + LoggerFactory.getLogger(DatanodeChunkValidator.class); + + @Option(names = {"-l", "--pipeline"}, + description = "Pipeline to use. By default the first RATIS/THREE " + + "pipeline will be used.", + defaultValue = "") + private String pipelineId; + + @Option(names = {"-s", "--size"}, + description = "Size of the generated chunks (in bytes)", + defaultValue = "1024") + private int chunkSize; + + private XceiverClientSpi xceiverClientSpi; + + private Timer timer; + + private ChecksumData checksumReference; + + private Checksum checksum; + + + @Override + public Void call() throws Exception { + + init(); + + OzoneConfiguration ozoneConf = createOzoneConfiguration(); + if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) { + throw new IllegalArgumentException( + "Datanode chunk validator is not supported in secure environment" + ); + } + + try (StorageContainerLocationProtocol scmLocationClient = + createStorageContainerLocationClient(ozoneConf)) { + List pipelines = scmLocationClient.listPipelines(); + Pipeline pipeline; + if (pipelineId != null && pipelineId.length() > 0) { + pipeline = pipelines.stream() + .filter(p -> p.getId().toString().equals(pipelineId)) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException( + "Pipeline ID is defined, but there is no such pipeline: " + + pipelineId)); + + } else { + pipeline = pipelines.stream() + .filter(p -> p.getFactor() == HddsProtos.ReplicationFactor.THREE) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException( + "Pipeline ID is NOT defined, and no pipeline " + + "has been found with factor=THREE")); + LOG.info("Using pipeline {}", pipeline.getId()); + } + + try (XceiverClientManager xceiverClientManager = + new XceiverClientManager(ozoneConf)) { + xceiverClientSpi = xceiverClientManager.acquireClient(pipeline); + + readReference(); + + timer = getMetrics().timer("chunk-validate"); + + runTests(this::validateChunk); + } + + } finally { + if (xceiverClientSpi != null) { + xceiverClientSpi.close(); + } + } + return null; + } + + /** + * Read a reference chunk using same name than one from the + * {@link org.apache.hadoop.ozone.freon.DatanodeChunkGenerator}. + * @throws IOException + */ + private void readReference() throws IOException { + ContainerProtos.DatanodeBlockID blockId = + ContainerProtos.DatanodeBlockID.newBuilder() + .setContainerID(1L) + .setLocalID(0 % 20) + .setBlockCommitSequenceId(0) + .build(); + + // As a reference, the first one generated (at step 0) is taken + ContainerProtos.ChunkInfo chunkInfo = ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(getPrefix() + "_testdata_chunk_" + 0) + .setOffset((0 / 20) * chunkSize) + .setLen(chunkSize) + .setChecksumData( + ContainerProtos.ChecksumData.newBuilder() + .setBytesPerChecksum(4) + .setType(ContainerProtos.ChecksumType.CRC32) + .build()) + .build(); + + ContainerProtos.ReadChunkRequestProto.Builder readChunkRequest = + ContainerProtos.ReadChunkRequestProto + .newBuilder() + .setBlockID(blockId) + .setChunkData(chunkInfo); + + String id = xceiverClientSpi.getPipeline().getFirstNode().getUuidString(); + + ContainerProtos.ContainerCommandRequestProto.Builder builder = + ContainerProtos.ContainerCommandRequestProto + .newBuilder() + .setCmdType(ContainerProtos.Type.ReadChunk) + .setContainerID(blockId.getContainerID()) + .setDatanodeUuid(id) + .setReadChunk(readChunkRequest); + + ContainerProtos.ContainerCommandRequestProto request = builder.build(); + ContainerProtos.ContainerCommandResponseProto response = + xceiverClientSpi.sendCommand(request); + + checksum = new Checksum(ContainerProtos.ChecksumType.CRC32, chunkSize); + checksumReference = checksum.computeChecksum( + response.getReadChunk().getData().toByteArray() + ); + + } + + + private void validateChunk(long stepNo) throws Exception { + ContainerProtos.DatanodeBlockID blockId = + ContainerProtos.DatanodeBlockID.newBuilder() + .setContainerID(1L) + .setLocalID(stepNo % 20) + .setBlockCommitSequenceId(stepNo) + .build(); + + ContainerProtos.ChunkInfo chunkInfo = ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(getPrefix() + "_testdata_chunk_" + stepNo) + .setChecksumData( + ContainerProtos.ChecksumData.newBuilder() + .setBytesPerChecksum(4) + .setType(ContainerProtos.ChecksumType.CRC32) + .build()) + .setOffset((stepNo / 20) * chunkSize) + .setLen(chunkSize) + .build(); + + ContainerProtos.ReadChunkRequestProto.Builder readChunkRequest = + ContainerProtos.ReadChunkRequestProto + .newBuilder() + .setBlockID(blockId) + .setChunkData(chunkInfo); + + String id = xceiverClientSpi.getPipeline().getFirstNode().getUuidString(); + + ContainerProtos.ContainerCommandRequestProto.Builder builder = + ContainerProtos.ContainerCommandRequestProto + .newBuilder() + .setCmdType(ContainerProtos.Type.ReadChunk) + .setContainerID(blockId.getContainerID()) + .setDatanodeUuid(id) + .setReadChunk(readChunkRequest); + + ContainerProtos.ContainerCommandRequestProto request = builder.build(); + + timer.time(() -> { + try { + ContainerProtos.ContainerCommandResponseProto response = + xceiverClientSpi.sendCommand(request); + + ChecksumData checksumOfChunk = + checksum.computeChecksum( + response.getReadChunk().getData().toByteArray() + ); + + if (!checksumReference.equals(checksumOfChunk)) { + throw new IllegalStateException( + "Reference (=first) message checksum doesn't match " + + "with checksum of chunk " + + response.getReadChunk() + .getChunkData().getChunkName()); + } + } catch (IOException e) { + LOG.warn("Could not read chunk due to IOException: ", e); + } + }); + + } + + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index 1eb1b6874129..1b03540019bc 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -48,6 +48,7 @@ SameKeyReader.class, S3KeyGenerator.class, DatanodeChunkGenerator.class, + DatanodeChunkValidator.class, DatanodeBlockPutter.class, FollowerAppendLogEntryGenerator.class, ChunkManagerDiskWrite.class, @@ -58,6 +59,10 @@ public class Freon extends GenericCli { public static final Logger LOG = LoggerFactory.getLogger(Freon.class); + public Freon() { + super(Freon.class); + } + @Option(names = "--server", description = "Enable internal http server to provide metric " + "and profile endpoint") diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java index 62a49655f3c7..348aa244d344 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java @@ -97,17 +97,24 @@ public class HadoopDirTreeGenerator extends BaseFreonGenerator @Override public Void call() throws Exception { - - init(); - OzoneConfiguration configuration = createOzoneConfiguration(); - fileSystem = FileSystem.get(URI.create(rootPath), configuration); - - contentGenerator = new ContentGenerator(fileSizeInBytes, bufferSize); - timer = getMetrics().timer("file-create"); - - runTests(this::createDir); + String s; + if (depth <= 0) { + s = "Invalid depth value, depth value should be greater than zero!"; + print(s); + } else if (span <= 0) { + s = "Invalid span value, span value should be greater than zero!"; + print(s); + } else { + init(); + OzoneConfiguration configuration = createOzoneConfiguration(); + fileSystem = FileSystem.get(URI.create(rootPath), configuration); + + contentGenerator = new ContentGenerator(fileSizeInBytes, bufferSize); + timer = getMetrics().timer("file-create"); + + runTests(this::createDir); + } return null; - } /* @@ -139,21 +146,14 @@ public Void call() throws Exception { created. */ private void createDir(long counter) throws Exception { - if (depth <= 0) { - LOG.info("Invalid depth value, at least one depth should be passed!"); - return; - } - if (span <= 0) { - LOG.info("Invalid span value, at least one span should be passed!"); - return; - } String dir = makeDirWithGivenNumberOfFiles(rootPath); if (depth > 1) { createSubDirRecursively(dir, 1, 1); } - System.out.println("Successfully created directories & files. Total Dirs " + + String message = "Successfully created directories & files. Total Dirs " + "Count=" + totalDirsCnt.get() + ", Total Files Count=" + - timer.getCount()); + timer.getCount(); + print(message); } private void createSubDirRecursively(String parent, int depthIndex, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java index 548f829fba44..925ba7dc2e96 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java @@ -57,10 +57,15 @@ public class HadoopFsGenerator extends BaseFreonGenerator private int fileSize; @Option(names = {"--buffer"}, - description = "Size of buffer used to generated the key content.", - defaultValue = "4096") + description = "Size of buffer used store the generated key content", + defaultValue = "10240") private int bufferSize; + @Option(names = {"--copy-buffer"}, + description = "Size of bytes written to the output in one operation", + defaultValue = "4096") + private int copyBufferSize; + private ContentGenerator contentGenerator; private Timer timer; @@ -76,7 +81,8 @@ public Void call() throws Exception { fileSystem = FileSystem.get(URI.create(rootPath), configuration); - contentGenerator = new ContentGenerator(fileSize, bufferSize); + contentGenerator = + new ContentGenerator(fileSize, bufferSize, copyBufferSize); timer = getMetrics().timer("file-create"); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java index 72d096c227f7..8bc8a37708ce 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java @@ -72,13 +72,20 @@ public class HadoopNestedDirGenerator extends BaseFreonGenerator @Override public Void call() throws Exception { - - init(); - OzoneConfiguration configuration = createOzoneConfiguration(); - fileSystem = FileSystem.get(URI.create(rootPath), configuration); - runTests(this::createDir); + String s; + if (depth <= 0) { + s = "Invalid depth value, depth value should be greater than zero!"; + print(s); + } else if (span < 0) { + s = "Invalid span value, span value should be greater or equal to zero!"; + print(s); + } else { + init(); + OzoneConfiguration configuration = createOzoneConfiguration(); + fileSystem = FileSystem.get(URI.create(rootPath), configuration); + runTests(this::createDir); + } return null; - } /* @@ -109,5 +116,8 @@ private void createDir(long counter) throws Exception { Path dir = new Path(rootPath.concat("/").concat(childDir)); fileSystem.mkdirs(dir.getParent()); } + String message = "\nSuccessfully created directories. " + + "Total Directories with level = " + depth + " and span = " + span; + print(message); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java index 81ff0ea831ab..fe73e35db3ab 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java @@ -93,7 +93,7 @@ public static void main(String[] args) throws IOException { Table.KeyValue keyValue = keyValueTableIterator.next(); OmKeyInfo omKeyInfo = keyValue.getValue(); - byte[] value = omKeyInfo.getProtobuf().toByteArray(); + byte[] value = omKeyInfo.getProtobuf(true).toByteArray(); OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf( OzoneManagerProtocolProtos.KeyInfo.parseFrom(value)); for (OmKeyLocationInfoGroup keyLocationInfoGroup : keyInfo diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java index 173541944858..8c34a4bbb881 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java @@ -19,6 +19,10 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.ozone.debug.OzoneDebug; + +import org.kohsuke.MetaInfServices; import picocli.CommandLine; /** @@ -35,7 +39,8 @@ }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class RatisLogParser extends GenericCli { +@MetaInfServices(SubcommandWithParent.class) +public class RatisLogParser extends GenericCli implements SubcommandWithParent { @Override public void execute(String[] argv) { @@ -45,4 +50,9 @@ public void execute(String[] argv) { public static void main(String[] args) { new RatisLogParser().run(args); } + + @Override + public Class getParentType() { + return OzoneDebug.class; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java index 1d753287cf0c..0ddd65762895 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java @@ -21,7 +21,10 @@ import java.io.PrintStream; import java.net.URI; import java.net.URISyntaxException; +import java.util.Collection; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.MutableConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.OzoneClient; @@ -30,10 +33,10 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import com.google.common.annotations.VisibleForTesting; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_HTTP_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RPC_SCHEME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; - import org.apache.http.client.utils.URIBuilder; /** @@ -42,6 +45,7 @@ public class OzoneAddress { private static final int DEFAULT_OZONE_PORT = 50070; + private static final String EMPTY_HOST = "___DEFAULT___"; private URI ozoneURI; @@ -85,7 +89,32 @@ public OzoneAddress(String address) } - public OzoneClient createClient(OzoneConfiguration conf) + @VisibleForTesting + protected OzoneClient createRpcClient(ConfigurationSource conf) + throws IOException { + return OzoneClientFactory.getRpcClient(conf); + } + + @VisibleForTesting + protected OzoneClient createRpcClientFromHostPort( + String host, + int port, + MutableConfigurationSource conf + ) + throws IOException { + return OzoneClientFactory.getRpcClient(ozoneURI.getHost(), port, conf); + } + + @VisibleForTesting + protected OzoneClient createRpcClientFromServiceId( + String serviceId, + MutableConfigurationSource conf + ) + throws IOException { + return OzoneClientFactory.getRpcClient(serviceId, conf); + } + + public OzoneClient createClient(MutableConfigurationSource conf) throws IOException, OzoneClientException { OzoneClient client; String scheme = ozoneURI.getScheme(); @@ -96,50 +125,62 @@ public OzoneClient createClient(OzoneConfiguration conf) throw new UnsupportedOperationException( "REST schema is not supported any more. Please use AWS S3 protocol " + "if you need REST interface."); - } else if (scheme.equals(OZONE_RPC_SCHEME)) { - if (ozoneURI.getHost() != null && !ozoneURI.getAuthority() - .equals(EMPTY_HOST)) { - if (OmUtils.isOmHAServiceId(conf, ozoneURI.getHost())) { - // When host is an HA service ID - if (ozoneURI.getPort() != -1) { - throw new OzoneClientException( - "Port " + ozoneURI.getPort() + " specified in URI but host '" - + ozoneURI.getHost() + "' is a logical (HA) OzoneManager " - + "and does not use port information."); - } - client = OzoneClientFactory.getRpcClient(ozoneURI.getHost(), conf); - } else if (ozoneURI.getPort() == -1) { - client = OzoneClientFactory.getRpcClient(ozoneURI.getHost(), - OmUtils.getOmRpcPort(conf), conf); - } else { - client = OzoneClientFactory - .getRpcClient(ozoneURI.getHost(), ozoneURI.getPort(), conf); + } else if (!scheme.equals(OZONE_RPC_SCHEME)) { + throw new OzoneClientException( + "Invalid URI, unknown protocol scheme: " + scheme + ". Use " + + OZONE_RPC_SCHEME + ":// as the scheme"); + } + + if (ozoneURI.getHost() != null && !ozoneURI.getAuthority() + .equals(EMPTY_HOST)) { + if (OmUtils.isOmHAServiceId(conf, ozoneURI.getHost())) { + // When host is an HA service ID + if (ozoneURI.getPort() != -1) { + throw new OzoneClientException( + "Port " + ozoneURI.getPort() + " specified in URI but host '" + + ozoneURI.getHost() + "' is a logical (HA) OzoneManager " + + "and does not use port information."); } + client = createRpcClient(conf); + } else if (ozoneURI.getPort() == -1) { + client = createRpcClientFromHostPort(ozoneURI.getHost(), + OmUtils.getOmRpcPort(conf), conf); } else { - // When host is not specified - if (OmUtils.isServiceIdsDefined(conf)) { - throw new OzoneClientException("Service ID or host name must not" - + " be omitted when ozone.om.service.ids is defined."); - } - client = OzoneClientFactory.getRpcClient(conf); + client = createRpcClientFromHostPort(ozoneURI.getHost(), + ozoneURI.getPort(), conf); + } + } else {// When host is not specified + + Collection omServiceIds = conf.getTrimmedStringCollection( + OZONE_OM_SERVICE_IDS_KEY); + + if (omServiceIds.size() > 1) { + throw new OzoneClientException("Service ID or host name must not" + + " be omitted when multiple ozone.om.service.ids is defined."); + } else if (omServiceIds.size() == 1) { + client = createRpcClientFromServiceId(omServiceIds.iterator().next(), + conf); + } else { + client = createRpcClient(conf); } - } else { - throw new OzoneClientException( - "Invalid URI, unknown protocol scheme: " + scheme); } + return client; } /** * Create OzoneClient for S3Commands. + * * @param conf * @param omServiceID * @return OzoneClient * @throws IOException * @throws OzoneClientException */ - public OzoneClient createClientForS3Commands(OzoneConfiguration conf, - String omServiceID) + public OzoneClient createClientForS3Commands( + OzoneConfiguration conf, + String omServiceID + ) throws IOException, OzoneClientException { if (omServiceID != null) { // OM HA cluster diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java index 7dad7641d6ba..ab1441c3f80d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java @@ -21,10 +21,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.shell.bucket.BucketCommands; -import org.apache.hadoop.ozone.shell.keys.KeyCommands; -import org.apache.hadoop.ozone.shell.token.TokenCommands; -import org.apache.hadoop.ozone.shell.volume.VolumeCommands; import picocli.CommandLine.Command; @@ -33,16 +29,14 @@ */ @Command(name = "ozone sh", description = "Shell for Ozone object store", - subcommands = { - VolumeCommands.class, - BucketCommands.class, - KeyCommands.class, - TokenCommands.class - }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) public class OzoneShell extends Shell { + public OzoneShell() { + super(OzoneShell.class); + } + /** * Main for the ozShell Command handling. * diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java index 0c71dc82cf3b..2016886a8721 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java @@ -23,7 +23,7 @@ /** * Ozone user interface commands. - * + *

* This class uses dispatch method to make calls * to appropriate handlers that execute the ozone functions. */ @@ -35,6 +35,12 @@ public abstract class Shell extends GenericCli { + "If they are not specified it will be identified from " + "the config files."; + public Shell() { + } + + public Shell(Class type) { + super(type); + } @Override protected void printError(Throwable errorArg) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java index ea4ec7096232..ecbf3795ad91 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java @@ -23,9 +23,12 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -38,6 +41,7 @@ InfoBucketHandler.class, ListBucketHandler.class, CreateBucketHandler.class, + LinkBucketHandler.class, DeleteBucketHandler.class, AddAclBucketHandler.class, RemoveAclBucketHandler.class, @@ -46,7 +50,9 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class BucketCommands implements GenericParentCommand, Callable { +@MetaInfServices(SubcommandWithParent.class) +public class BucketCommands implements GenericParentCommand, Callable, + SubcommandWithParent { @ParentCommand private Shell shell; @@ -66,4 +72,9 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } + + @Override + public Class getParentType() { + return OzoneShell.class; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java new file mode 100644 index 000000000000..6671f2da6fb8 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell.bucket; + +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.shell.Handler; +import org.apache.hadoop.ozone.shell.OzoneAddress; + +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; + +import java.io.IOException; + +/** + * Creates a symlink to another bucket. + */ +@Command(name = "link", + description = "creates a symlink to another bucket") +public class LinkBucketHandler extends Handler { + + @Parameters(index = "0", arity = "1..1", + description = "The bucket which the link should point to.", + converter = BucketUri.class) + private OzoneAddress source; + + @Parameters(index = "1", arity = "1..1", + description = "Address of the link bucket", + converter = BucketUri.class) + private OzoneAddress target; + + @Override + protected OzoneAddress getAddress() { + return source; + } + + /** + * Executes create bucket. + */ + @Override + public void execute(OzoneClient client, OzoneAddress address) + throws IOException { + + BucketArgs.Builder bb = new BucketArgs.Builder() + .setStorageType(StorageType.DEFAULT) + .setVersioning(false) + .setSourceVolume(source.getVolumeName()) + .setSourceBucket(source.getBucketName()); + + String volumeName = target.getVolumeName(); + String bucketName = target.getBucketName(); + + OzoneVolume vol = client.getObjectStore().getVolume(volumeName); + vol.createBucket(bucketName, bb.build()); + + if (isVerbose()) { + OzoneBucket bucket = vol.getBucket(bucketName); + printObjectAsJson(bucket); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java index 62dd1b26eb33..28af82fab0fd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java @@ -23,9 +23,12 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -50,7 +53,9 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class KeyCommands implements GenericParentCommand, Callable { +@MetaInfServices(SubcommandWithParent.class) +public class KeyCommands + implements GenericParentCommand, Callable, SubcommandWithParent { @ParentCommand private Shell shell; @@ -70,4 +75,9 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } + + @Override + public Class getParentType() { + return OzoneShell.class; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java index e0836caa1894..5b449c6cc545 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java @@ -18,16 +18,20 @@ package org.apache.hadoop.ozone.shell.token; +import java.util.concurrent.Callable; + import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; + +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; -import java.util.concurrent.Callable; - /** * Sub-command to group token related operations. */ @@ -41,7 +45,9 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class TokenCommands implements GenericParentCommand, Callable { +@MetaInfServices(SubcommandWithParent.class) +public class TokenCommands + implements GenericParentCommand, Callable, SubcommandWithParent { @ParentCommand private Shell shell; @@ -62,4 +68,8 @@ public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } + @Override + public Class getParentType() { + return OzoneShell.class; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java index cbeb92a68f20..af0ea1e33701 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java @@ -37,7 +37,7 @@ public class CreateVolumeHandler extends VolumeHandler { @Option(names = {"--user", "-u"}, - description = "Owner of of the volume") + description = "Owner of the volume") private String ownerName; @Option(names = {"--quota", "-q"}, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java index 1b05b48555c1..360d67462a69 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java @@ -23,9 +23,12 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -48,7 +51,9 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class VolumeCommands implements GenericParentCommand, Callable { +@MetaInfServices(SubcommandWithParent.class) +public class VolumeCommands implements GenericParentCommand, Callable, + SubcommandWithParent { @ParentCommand private Shell shell; @@ -68,4 +73,9 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } + + @Override + public Class getParentType() { + return OzoneShell.class; + } } diff --git a/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 03680027d539..e444f66e7ce1 100644 --- a/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,3 +14,4 @@ # limitations under the License. org.apache.hadoop.fs.ozone.OzoneFileSystem +org.apache.hadoop.fs.ozone.RootedOzoneFileSystem diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java new file mode 100644 index 000000000000..f63d14978740 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.debug; + +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_CONTAINER_KEY_DB; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB; +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; +import org.apache.hadoop.ozone.recon.scm.ReconSCMDBDefinition; +import org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition; +import org.junit.Test; + +/** + * Simple factory unit test. + */ +public class TestDBDefinitionFactory { + + @Test + public void testGetDefinition() { + DBDefinition definition = + DBDefinitionFactory.getDefinition(new OMDBDefinition().getName()); + assertTrue(definition instanceof OMDBDefinition); + + definition = DBDefinitionFactory.getDefinition( + new SCMDBDefinition().getName()); + assertTrue(definition instanceof SCMDBDefinition); + + definition = DBDefinitionFactory.getDefinition( + new ReconSCMDBDefinition().getName()); + assertTrue(definition instanceof ReconSCMDBDefinition); + + definition = DBDefinitionFactory.getDefinition( + RECON_OM_SNAPSHOT_DB + "_1"); + assertTrue(definition instanceof OMDBDefinition); + + definition = DBDefinitionFactory.getDefinition( + RECON_CONTAINER_KEY_DB + "_1"); + assertTrue(definition instanceof ReconDBDefinition); + } +} \ No newline at end of file diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java new file mode 100644 index 000000000000..d61be3a42dcf --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java @@ -0,0 +1,82 @@ +package org.apache.hadoop.ozone.freon; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +import org.junit.Assert; +import org.junit.Test; + +/** + * Tests for the ContentGenerator class of Freon. + */ +public class TestContentGenerator { + + @Test + public void writeWrite() throws IOException { + ContentGenerator generator = new ContentGenerator(1024, 1024); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + generator.write(output); + Assert.assertArrayEquals(generator.getBuffer(), output.toByteArray()); + } + + @Test + public void writeWithSmallerBuffers() throws IOException { + ContentGenerator generator = new ContentGenerator(10000, 1024, 3); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + generator.write(baos); + + Assert.assertEquals(10000, baos.toByteArray().length); + } + + @Test + public void writeWithByteLevelWrite() throws IOException { + ContentGenerator generator = new ContentGenerator(1024, 1024, 1); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + generator.write(output); + Assert.assertArrayEquals(generator.getBuffer(), output.toByteArray()); + } + + @Test + public void writeWithSmallBuffer() throws IOException { + ContentGenerator generator = new ContentGenerator(1024, 1024, 10); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + generator.write(output); + Assert.assertArrayEquals(generator.getBuffer(), output.toByteArray()); + } + + @Test + public void writeWithDistinctSizes() throws IOException { + ContentGenerator generator = new ContentGenerator(20, 8, 3); + ByteArrayOutputStream output = new ByteArrayOutputStream(); + + generator.write(output); + + byte[] expected = new byte[20]; + byte[] buffer = generator.getBuffer(); + System.arraycopy(buffer, 0, expected, 0, buffer.length); + System.arraycopy(buffer, 0, expected, 8, buffer.length); + System.arraycopy(buffer, 0, expected, 16, 4); + Assert.assertArrayEquals(expected, output.toByteArray()); + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index 9279d7f226d3..e520190e4c95 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -143,7 +143,8 @@ public List handleExecutionException(ExecutionException ex, cmd.parseWithHandlers(new CommandLine.RunLast(), exceptionHandler, args); }catch(Exception ex){ - Assert.assertTrue(ex.getMessage().contains(msg)); + Assert.assertTrue("Expected " + msg + ", but got: " + ex.getMessage(), + ex.getMessage().contains(msg)); } } @@ -225,7 +226,7 @@ public void genconfFailureByInvalidPath() throws Exception { public void genconfPathNotSpecified() throws Exception { File tempPath = getRandomTempDir(); String[] args = new String[]{}; - executeWithException(args, "Missing required parameter: "); + executeWithException(args, "Missing required parameter: ''"); } /** diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java index 3ab866c49857..10ba576d8160 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java @@ -39,9 +39,9 @@ public class TestOzoneAddress { @Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {"o3fs://localhost:9878/"}, - {"o3fs://localhost/"}, - {"o3fs:///"}, + {"o3://localhost:9878/"}, + {"o3://localhost/"}, + {"o3:///"}, {"/"}, {""} }); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java new file mode 100644 index 000000000000..1c58a7d52f67 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.shell; + +import java.io.IOException; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.MutableConfigurationSource; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.hdds.conf.InMemoryConfiguration; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test ozone client creation. + */ +public class TestOzoneAddressClientCreation { + + @Test + public void implicitNonHA() throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("/vol1/bucket1/key1"); + address.createClient(new InMemoryConfiguration()); + Assert.assertTrue(address.simpleCreation); + } + + @Test + public void implicitHAOneServiceId() + throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("/vol1/bucket1/key1"); + address.createClient( + new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1")); + Assert.assertFalse(address.simpleCreation); + Assert.assertEquals("service1", address.serviceId); + } + + @Test(expected = OzoneClientException.class) + public void implicitHaMultipleServiceId() + throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("/vol1/bucket1/key1"); + address.createClient( + new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, + "service1,service2")); + } + + @Test + public void explicitNonHAHostPort() throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); + address.createClient(new InMemoryConfiguration()); + Assert.assertFalse(address.simpleCreation); + Assert.assertEquals("om", address.host); + Assert.assertEquals(9862, address.port); + } + + @Test + public void explicitHAHostPortWithServiceId() + throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); + address.createClient( + new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1")); + Assert.assertFalse(address.simpleCreation); + Assert.assertEquals("om", address.host); + Assert.assertEquals(9862, address.port); + } + + @Test + public void explicitAHostPortWithServiceIds() + throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); + address.createClient( + new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, + "service1,service2")); + Assert.assertFalse(address.simpleCreation); + Assert.assertEquals("om", address.host); + Assert.assertEquals(9862, address.port); + } + + @Test + public void explicitNonHAHost() throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("o3://om/vol1/bucket1/key1"); + address.createClient( + new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1")); + Assert.assertFalse(address.simpleCreation); + Assert.assertEquals("om", address.host); + } + + @Test + public void explicitHAHostPort() throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("o3://om:1234/vol1/bucket1/key1"); + address.createClient(new InMemoryConfiguration()); + Assert.assertFalse(address.simpleCreation); + Assert.assertEquals("om", address.host); + Assert.assertEquals(1234, address.port); + } + + @Test(expected = OzoneClientException.class) + public void explicitWrongScheme() throws OzoneClientException, IOException { + TestableOzoneAddress address = + new TestableOzoneAddress("ssh://host/vol1/bucket1/key1"); + address.createClient(new InMemoryConfiguration()); + } + + /** + * OzoneAddress with modification to make it easier to test. + */ + @SuppressWarnings("checkstyle") + private static class TestableOzoneAddress extends OzoneAddress { + + private String host; + private int port; + private boolean simpleCreation; + private String serviceId; + + TestableOzoneAddress(String address) throws OzoneClientException { + super(address); + } + + TestableOzoneAddress() throws OzoneClientException { + } + + @Override + protected OzoneClient createRpcClient(ConfigurationSource conf) + throws IOException { + simpleCreation = true; + return null; + } + + @Override + protected OzoneClient createRpcClientFromHostPort( + String hostParam, int portParam, MutableConfigurationSource conf + ) throws IOException { + this.host = hostParam; + this.port = portParam; + return null; + } + + @Override + protected OzoneClient createRpcClientFromServiceId( + String serviceIdParam, MutableConfigurationSource conf + ) throws IOException { + this.serviceId = serviceIdParam; + return null; + } + } + +} \ No newline at end of file diff --git a/hadoop-ozone/upgrade/pom.xml b/hadoop-ozone/upgrade/pom.xml deleted file mode 100644 index ed80d9cb0a88..000000000000 --- a/hadoop-ozone/upgrade/pom.xml +++ /dev/null @@ -1,57 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.6.0-SNAPSHOT - - hadoop-ozone-upgrade - 0.6.0-SNAPSHOT - Apache Hadoop Ozone In-Place Upgrade - Apache Hadoop Ozone In-Place Upgrade - jar - - - - org.apache.hadoop - hadoop-hdds-test-utils - test - - - org.apache.hadoop - hadoop-hdds-common - - - com.github.spotbugs - spotbugs - provided - - - junit - junit - test - - - org.mockito - mockito-core - test - - - diff --git a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java b/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java deleted file mode 100644 index b307f44d02e9..000000000000 --- a/hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.upgrade; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; - -import picocli.CommandLine.Command; - -/** - * Command line interface for the In-Place upgrade utility. - *

- * In-Place upgrade can convert HDFS cluster data to Ozone data without - * (or minimal) data moving. - */ -@Command(name = "ozone upgrade", - description = "Convert raw HDFS data to Ozone data without data movement.", - subcommands = { - Plan.class, - Balance.class, - Execute.class, - }, - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class InPlaceUpgrade extends GenericCli { - - public static void main(String[] args) { - new InPlaceUpgrade().run(args); - } -} diff --git a/pom.xml b/pom.xml index 40f2f58634a6..eca71e9e1333 100644 --- a/pom.xml +++ b/pom.xml @@ -18,7 +18,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 4.0.0 org.apache.hadoop hadoop-main-ozone - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Apache Hadoop Ozone Main Apache Hadoop Ozone Main pom @@ -73,16 +73,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${ozone.version} - 0.6.0-SNAPSHOT + 1.1.0-SNAPSHOT Denali ${hdds.version} ${ozone.version} - 0.6.0-6ab75ae-SNAPSHOT + 1.0.0 - 0.4.0 + 0.5.0 apache.snapshots.https Apache Development Snapshot Repository @@ -146,7 +146,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.7.25 1.2.17 - 2.11.0 + 2.13.3 3.4.2 0.7.0 @@ -229,7 +229,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.0-alpha-8 3.0.0 8.19 - 900 + 1200 1.11.615 2.3.4 1.10.0 @@ -260,7 +260,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs info.picocli picocli - 3.9.6 + 4.4.0 jdiff @@ -1332,6 +1332,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hsqldb ${hsqldb.version} + + org.kohsuke.metainf-services + metainf-services + 1.1 + true + io.dropwizard.metrics metrics-core @@ -1615,7 +1621,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs proto-backwards-compatibility ${proto-backwards-compatibility.version} - ${basedir}/src/main/proto/ + ${basedir}/target/classes @@ -2146,22 +2152,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - - freon - - - - org.apache.maven.plugins - maven-surefire-plugin - - - org.apache.hadoop.ozone.freon.** - - - - - - client @@ -2179,23 +2169,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - filesystem-contract - - - - org.apache.maven.plugins - maven-surefire-plugin - - - org.apache.hadoop.fs.ozone.contract.** - - - - - - - - filesystem + filesystem-hdds @@ -2204,26 +2178,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.fs.ozone.** - - - org.apache.hadoop.fs.ozone.contract.** - - - - - - - - hdds-om - - - - org.apache.maven.plugins - maven-surefire-plugin - - org.apache.hadoop.hdds.** - org.apache.hadoop.ozone.om.** @@ -2243,8 +2198,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.ozone.client.** - org.apache.hadoop.ozone.freon.** - org.apache.hadoop.ozone.om.**